장윤호

init

Showing 106 changed files with 9122 additions and 0 deletions
import torch
import torch.nn as nn
from model import mobilenetv3
from PyQt5.QtCore import pyqtSignal, QThread
from PyQt5 import QtCore, QtGui, QtWidgets
from train import UI_train
from test import UI_validate, UI_test, UI_temp
import threading, _thread
import time
import os
from queue import Queue
import multiprocessing
import logging
threads = []
logger = logging.getLogger('Techwing_log')
######### 눈금까지 기재된 커스터마이징 슬라이더 #########
class LabeledSlider(QtWidgets.QWidget):
def __init__(self, minimum=1, maximum=11, start_value=4, interval=1, orientation=QtCore.Qt.Horizontal,
labels=None, p0=4, parent=None):
super(LabeledSlider, self).__init__(parent=parent)
levels=range(minimum, maximum + interval, interval)
if labels is not None:
if not isinstance(labels, (tuple, list)):
raise Exception("<labels> is a list or tuple.")
if len(labels) != len(levels):
raise Exception("Size of <labels> doesn't match levels.")
self.levels=list(zip(levels,labels))
else:
self.levels=list(zip(levels,map(str,levels)))
if orientation==QtCore.Qt.Horizontal:
self.layout=QtWidgets.QVBoxLayout(self)
elif orientation==QtCore.Qt.Vertical:
self.layout=QtWidgets.QHBoxLayout(self)
else:
raise Exception("<orientation> wrong.")
# gives some space to print labels
self.left_margin=10
self.top_margin=10
self.right_margin=10
self.bottom_margin=10
self.layout.setContentsMargins(self.left_margin,self.top_margin,
self.right_margin,self.bottom_margin)
self.sl=QtWidgets.QSlider(orientation, self)
self.sl.setMinimum(minimum)
self.sl.setMaximum(maximum)
self.sl.setValue(start_value)
self.sl.setSliderPosition(p0)
if orientation==QtCore.Qt.Horizontal:
self.sl.setTickPosition(QtWidgets.QSlider.TicksBelow)
self.sl.setMinimumWidth(300) # just to make it easier to read
else:
self.sl.setTickPosition(QtWidgets.QSlider.TicksLeft)
self.sl.setMinimumHeight(80) # just to make it easier to read
self.sl.setTickInterval(interval)
self.sl.setSingleStep(1)
self.layout.addWidget(self.sl)
def paintEvent(self, e):
super(LabeledSlider,self).paintEvent(e)
style=self.sl.style()
painter=QtGui.QPainter(self)
st_slider=QtWidgets.QStyleOptionSlider()
st_slider.initFrom(self.sl)
st_slider.orientation=self.sl.orientation()
length=style.pixelMetric(QtWidgets.QStyle.PM_SliderLength, st_slider, self.sl)
available=style.pixelMetric(QtWidgets.QStyle.PM_SliderSpaceAvailable, st_slider, self.sl)
for v, v_str in self.levels:
# get the size of the label
rect=painter.drawText(QtCore.QRect(), QtCore.Qt.TextDontPrint, v_str)
if self.sl.orientation()==QtCore.Qt.Horizontal:
# I assume the offset is half the length of slider, therefore
# + length//2
x_loc=QtWidgets.QStyle.sliderPositionFromValue(self.sl.minimum(),
self.sl.maximum(), v, available)+length//2
# left bound of the text = center - half of text width + L_margin
left=x_loc-rect.width()//2+self.left_margin
bottom=self.rect().bottom()
# enlarge margins if clipping
if v==self.sl.minimum():
if left<=0:
self.left_margin=rect.width()//2-x_loc
if self.bottom_margin<=rect.height():
self.bottom_margin=rect.height()
self.layout.setContentsMargins(self.left_margin,
self.top_margin, self.right_margin,
self.bottom_margin)
if v==self.sl.maximum() and rect.width()//2>=self.right_margin:
self.right_margin=rect.width()//2
self.layout.setContentsMargins(self.left_margin,
self.top_margin, self.right_margin,
self.bottom_margin)
else:
y_loc=QtWidgets.QStyle.sliderPositionFromValue(self.sl.minimum(),
self.sl.maximum(), v, available, upsideDown=True)
bottom=y_loc+length//2+rect.height()//2+self.top_margin-3
# there is a 3 px offset that I can't attribute to any metric
left=self.left_margin-rect.width()
if left<=0:
self.left_margin=rect.width()+2
self.layout.setContentsMargins(self.left_margin,
self.top_margin, self.right_margin,
self.bottom_margin)
pos=QtCore.QPoint(left, bottom)
painter.drawText(pos, v_str)
return
class BaseThread(threading.Thread):
def __init__(self, callback=None, callback_args=None, *args, **kwargs):
target = kwargs.pop('target')
super(BaseThread, self).__init__(target=self.target_with_callback, *args, **kwargs)
self.callback = callback
self.method = target
self.callback_args = callback_args
def target_with_callback(self, *args, **kwargs):
self.method(*args, **kwargs)
if self.callback is not None:
self.callback(*self.callback_args)
# dialog log창 핸들러
class QTextEditLogger(logging.Handler):
def __init__(self, parent):
super().__init__()
self.widget = QtWidgets.QTextEdit()
parent.addWidget(self.widget)
self.widget.setReadOnly(True)
def emit(self, record):
msg = self.format(record)
self.widget.append(msg)
QtGui.QGuiApplication.processEvents()
self.widget.moveCursor(QtGui.QTextCursor.End)
# Adding dialog for closeevent.
class Dialog_form(QtWidgets.QDialog):
def __init__(self, parent=None):
super(Dialog_form, self).__init__(parent)
def closeEvent(self, evnt):
super(Dialog_form, self).closeEvent(evnt)
_thread.interrupt_main()
# main Dialog
class Ui_Dialog(QtWidgets.QWidget):
def setupUi(self, Dialog):
######### Default 값 설정 #########
self.mode = "Error"
self.q = Queue()
self.use_checkpoint=False
######### 확인을 하기 위해 Default model 설정 #########
self.model = mobilenetv3(n_class=2, blocknum=4, dropout=0.5)
if torch.cuda.is_available():
torch.cuda.set_device(0)
with torch.cuda.device(0):
self.model = self.model.cuda()
self.model = torch.nn.DataParallel(self.model, device_ids=[0], output_device=[0]) # 모델을 다른 GPU에 뿌려준 다음 Gradient를 한 군데에서 계산하기 때문에 보통 0번 GPU에 많은 메로리가 할당됨.
# 하나의 GPU에 많은 메모리가 할당되면 batchsize를 늘릴 수 없기 때문에 이를 해결하기 위하여 output_device를 할당.
checkpoint = torch.load("output/Error/2456_model=MobilenetV3-ep=3000-block=4/model_best.pth.tar") # 해당 코드는 데이터의 크기가 작기 때문에 0번에다가 모두 처리하는 것으로 설정.
else:
self.model = torch.nn.DataParallel(self.model)
device = torch.device("cpu")
self.model.to(device)
checkpoint = torch.load("output/Error/2456_model=MobilenetV3-ep=3000-block=4/model_best.pth.tar", map_location=torch.device('cpu'))
self.model.load_state_dict(checkpoint['state_dict'])
######### 다이얼로그 설정 및 로그 버튼 프레임 선언 #########
Dialog.resize(1500, 900)
Dialog.setObjectName("Dialog")
hbox = QtWidgets.QHBoxLayout(Dialog)
logframe = QtWidgets.QFrame(self)
buttonframe = QtWidgets.QFrame(self)
logframe.setFrameShape(QtWidgets.QFrame.StyledPanel)
buttonframe.setFrameShape(QtWidgets.QFrame.StyledPanel)
logLayout = QtWidgets.QVBoxLayout()
buttonLayout = QtWidgets.QVBoxLayout()
######### 버튼 선언 #########
# Train
self.pushButton = QtWidgets.QPushButton()
self.pushButton.setFixedHeight(50)
# Validation
self.pushButton_2 = QtWidgets.QPushButton()
self.pushButton_2.setFixedHeight(50)
# Test (dir)
self.pushButton_3 = QtWidgets.QPushButton()
self.pushButton_3.setFixedHeight(50)
# Test (file)
self.pushButton_4 = QtWidgets.QPushButton("Test (file)")
self.pushButton_4.setFixedHeight(50)
# Temp Test for consistent model
self.pushButton_5 = QtWidgets.QPushButton("Temp Test")
self.pushButton_5.setFixedHeight(50)
######### 모델 실행 버튼 UI #########
model_control_layout = QtWidgets.QHBoxLayout()
model_control_layout.addWidget(self.pushButton)
model_control_layout.addWidget(self.pushButton_2)
model_control_layout.addWidget(self.pushButton_3)
model_control_layout.addWidget(self.pushButton_4)
#model_control_layout.addWidget(self.pushButton_5)
self.model_control_container = QtWidgets.QWidget()
self.model_control_container.setLayout(model_control_layout)
self.model_control_container.setFixedHeight(60)
######### DATA PATH 관련 UI (dir) #########
self.dirpathlabel = QtWidgets.QLabel("data path (dir):")
self.dirselectedpath = QtWidgets.QLineEdit("no data")
self.dirselectedpath.setReadOnly(True)
self.data_dir_select_btn = QtWidgets.QPushButton("...")
self.dirpathlayout = QtWidgets.QHBoxLayout()
self.dirpathlayout.addWidget(self.dirpathlabel)
self.dirpathlayout.addWidget(self.dirselectedpath)
self.dirpathlayout.addWidget(self.data_dir_select_btn)
self.dirpathcontainer = QtWidgets.QWidget()
self.dirpathcontainer.setLayout(self.dirpathlayout)
self.dirpathcontainer.setFixedHeight(40)
######### DATA PATH 관련 UI (file) #########
self.filepathlabel = QtWidgets.QLabel("data path (file):")
self.fileselectedpath = QtWidgets.QLineEdit("no data")
self.fileselectedpath.setReadOnly(True)
self.data_file_select_btn = QtWidgets.QPushButton("...")
self.filepathlayout = QtWidgets.QHBoxLayout()
self.filepathlayout.addWidget(self.filepathlabel)
self.filepathlayout.addWidget(self.fileselectedpath)
self.filepathlayout.addWidget(self.data_file_select_btn)
self.filepathcontainer = QtWidgets.QWidget()
self.filepathcontainer.setLayout(self.filepathlayout)
self.filepathcontainer.setFixedHeight(40)
######### CHECKPOINT PATH 관련 UI #########
self.ck_pathlabel = QtWidgets.QLabel("checkpoint path :")
self.ck_selectedpath = QtWidgets.QLineEdit("no checkpoint")
self.ck_selectedpath.setReadOnly(True)
self.ck_select_btn = QtWidgets.QPushButton("...")
self.ck_pathlayout = QtWidgets.QHBoxLayout()
self.ck_pathlayout.addWidget(self.ck_pathlabel)
self.ck_pathlayout.addWidget(self.ck_selectedpath)
self.ck_pathlayout.addWidget(self.ck_select_btn)
self.ck_pathcontainer = QtWidgets.QWidget()
self.ck_pathcontainer.setLayout(self.ck_pathlayout)
self.ck_pathcontainer.setFixedHeight(40)
######### Blocknum 조절 #########
self.blocknum_slider = LabeledSlider()
######### 경로 관련 widget들 groupbox에 할당 #########
self.path_groupbox = QtWidgets.QGroupBox("경로")
self.path_layout = QtWidgets.QVBoxLayout()
self.path_layout.addWidget(self.dirpathcontainer)
self.path_layout.addWidget(self.filepathcontainer)
self.path_layout.addWidget(self.ck_pathcontainer)
self.path_groupbox.setLayout(self.path_layout)
self.path_groupbox.setFixedHeight(140)
######### model parameter groupbox에 할당 #########
self.model_groupbox = QtWidgets.QGroupBox("모델 블록")
self.blocknum_layout = QtWidgets.QVBoxLayout()
self.blocknum_layout.addWidget(self.blocknum_slider)
self.model_groupbox.setLayout(self.blocknum_layout)
self.model_groupbox.setFixedHeight(80)
######### Model 기능 관련 UI ######### (All, Error, ErrorType)
self.modelayout = QtWidgets.QHBoxLayout()
self.Errorbtn = QtWidgets.QRadioButton("에러 검출")
self.Typebtn = QtWidgets.QRadioButton("에러 타입")
self.Allbtn = QtWidgets.QRadioButton("전체 타입 검출")
self.Errorbtn.setChecked(True)
self.modelayout.addWidget(self.Errorbtn)
self.modelayout.addWidget(self.Typebtn)
self.modelayout.addWidget(self.Allbtn)
self.modecontainer = QtWidgets.QGroupBox("모델 종류")
self.modecontainer.setLayout(self.modelayout)
self.modecontainer.setFixedHeight(70)
######### 학습 파라미터 관련 UI #########
self.train_parameters_layout = QtWidgets.QHBoxLayout()
self.epoch_label = QtWidgets.QLabel("Epoch :")
self.epoch_input = QtWidgets.QLineEdit("3000")
self.epoch_input.setValidator(QtGui.QIntValidator(1,3001))
self.optim_label = QtWidgets.QLabel("Optim :")
self.optim_input = QtWidgets.QComboBox()
self.optim_input.addItem("SGD")
self.optim_input.addItem("Adam")
self.optim_input.setFixedWidth(100)
self.lr_label = QtWidgets.QLabel("Learning rate :")
self.lr_input = QtWidgets.QLineEdit("0.001")
self.lr_input.setValidator(QtGui.QDoubleValidator(999999, -999999, 8))
self.batch_label = QtWidgets.QLabel("batch size :")
self.batch_input = QtWidgets.QLineEdit("256")
self.batch_input.setValidator(QtGui.QIntValidator(1,1025))
self.imagesize_label = QtWidgets.QLabel("Image size :")
self.imagesize_input = QtWidgets.QLineEdit("64")
self.imagesize_input.setValidator(QtGui.QIntValidator(1,1025))
self.train_parameters_layout.addWidget(self.epoch_label)
self.train_parameters_layout.addWidget(self.epoch_input)
self.train_parameters_layout.addWidget(self.optim_label)
self.train_parameters_layout.addWidget(self.optim_input)
self.train_parameters_layout.addWidget(self.lr_label)
self.train_parameters_layout.addWidget(self.lr_input)
self.train_parameters_layout.addWidget(self.batch_label)
self.train_parameters_layout.addWidget(self.batch_input)
self.train_parameters_layout.addWidget(self.imagesize_label)
self.train_parameters_layout.addWidget(self.imagesize_input)
self.train_parameters = QtWidgets.QGroupBox("학습 파라미터")
self.train_parameters.setLayout(self.train_parameters_layout)
self.train_parameters.setFixedHeight(60)
######### 구성한 Container들 Dialog에 추가 #########
buttonLayout.addWidget(self.model_control_container)
buttonLayout.addWidget(self.path_groupbox)
buttonLayout.addWidget(self.model_groupbox)
buttonLayout.addWidget(self.train_parameters)
buttonLayout.addWidget(self.modecontainer)
######### logger format 설정 ######### (파일로 저장되는 Log랑 다른 Logger이기 때문에 화면에 출력되는 Log와 저장되는 Log랑은 다름)
logTextBox = QTextEditLogger(logLayout)
logTextBox.setFormatter(logging.Formatter('%(asctime)s - %(message)s'))
logging.getLogger('Techwing_log').addHandler(logTextBox)
logging.getLogger('Techwing_log').setLevel(logging.INFO)
######### log Widget, Button Widget 비율 설정 부분 #########
logframe.setLayout(logLayout)
buttonframe.setLayout(buttonLayout)
splitter = QtWidgets.QSplitter(QtCore.Qt.Horizontal)
splitter.addWidget(logframe)
splitter.addWidget(buttonframe)
splitter.setSizes([600,200])
hbox.addWidget(splitter)
Dialog.setLayout(hbox)
QtWidgets.QApplication.setStyle(QtWidgets.QStyleFactory.create('Cleanlooks'))
# 버튼 input word 설정
self.retranslateUi(Dialog)
######### 버튼 기능 설정 부분 #########
self.Errorbtn.clicked.connect(self.modeBtnClicked)
self.Typebtn.clicked.connect(self.modeBtnClicked)
self.Allbtn.clicked.connect(self.modeBtnClicked)
self.pushButton.clicked.connect(self.Train_btn_clicked)
self.pushButton_2.clicked.connect(self.Val_btn_clicked)
self.pushButton_3.clicked.connect(lambda: self.Test_btn_clicked('dir'))
self.pushButton_4.clicked.connect(lambda: self.Test_btn_clicked('file'))
self.pushButton_5.clicked.connect(self.temp_btn_clicked)
self.data_dir_select_btn.clicked.connect(self.Path_btn_clicked)
self.data_file_select_btn.clicked.connect(self.filePath_btn_clicked)
self.ck_select_btn.clicked.connect(self.checkpoint_btn_clicked)
######### Log 입력해주는 쓰레드 설정 #########
c = threading.Thread(target=self.write, args=(self.q,), daemon=True)
c.start()
QtCore.QMetaObject.connectSlotsByName(Dialog)
######### 버튼 UI 설정 #########
def retranslateUi(self, Dialog):
_translate = QtCore.QCoreApplication.translate
Dialog.setWindowTitle(_translate("Dialog", "Dialog"))
self.pushButton.setText(_translate("Dialog", "Train"))
self.pushButton_2.setText(_translate("Dialog", "Validate"))
self.pushButton_3.setText(_translate("Dialog", "Test (dir)"))
######### Radio 버튼 동작 이벤트 처리 #########
def modeBtnClicked(self):
if self.Errorbtn.isChecked():
self.q.put("set error")
self.mode = "Error"
self.blocknum_slider.sl.setValue(4)
self.blocknum_slider.sl.setSliderPosition(4)
self.imagesize_input.setText("64")
elif self.Typebtn.isChecked():
self.q.put("set Type")
self.mode = "Type"
self.blocknum_slider.sl.setValue(4)
self.blocknum_slider.sl.setSliderPosition(4)
self.imagesize_input.setText("64")
else:
self.q.put("set All processing")
self.mode = "All"
self.blocknum_slider.sl.setValue(6)
self.blocknum_slider.sl.setSliderPosition(6)
self.imagesize_input.setText("224")
######### Train 버튼 동작 이벤트 #########
def Train_btn_clicked(self):
if self.dirselectedpath.text() != "no data":
self.set_all_btn_enabled(False)
logging.info("train start")
blocknum = self.blocknum_slider.sl.value()
kwargs = {"resume": self.use_checkpoint, "blocknum": blocknum}
kwargs["data_path"] = self.dirselectedpath.text()
kwargs["epoch"] = int(self.epoch_input.text())
kwargs["lr"] = float(self.lr_input.text())
kwargs["batch_size"] = int(self.batch_input.text())
kwargs["optim"] = str(self.optim_input.currentText())
kwargs["size"] = int(self.imagesize_input.text())
if self.use_checkpoint:
kwargs["ck_path"] = self.ck_selectedpath.text()
t = BaseThread(target=UI_train, callback=self.set_all_btn_enabled, callback_args=(True,),
args=(self.mode, self.q), kwargs=kwargs)
threads.append(t)
t.start()
self.q.join()
else:
self.q.put("데이터를 입력해 주세요.")
######### Test 버튼 동작 이벤트 #########
def Test_btn_clicked(self, file_mode):
if self.use_checkpoint:
self.set_all_btn_enabled(False)
logging.info('Test start')
blocknum = self.blocknum_slider.sl.value()
kwargs = {"use_ck": self.use_checkpoint, "blocknum": blocknum}
kwargs["size"] = int(self.imagesize_input.text())
kwargs["ck_path"] = self.ck_selectedpath.text()
logging.info(f"start test using path : {self.ck_selectedpath.text()}")
if file_mode == 'dir':
if self.dirselectedpath.text() != "no data":
t = BaseThread(target=UI_test, callback=self.set_all_btn_enabled, callback_args=(True,)
,args=(self.mode, self.dirselectedpath.text(), file_mode, self.q), kwargs=kwargs)
t.start()
self.q.join()
else:
self.q.put("데이터를 입력해 주세요.")
self.set_all_btn_enabled(True)
else:
if self.fileselectedpath.text() != "no data":
t = BaseThread(target=UI_test, callback=self.set_all_btn_enabled, callback_args=(True,)
,args=(self.mode, self.fileselectedpath.text(), file_mode, self.q), kwargs=kwargs)
t.start()
self.q.join()
else:
self.q.put("데이터를 입력해 주세요.")
self.set_all_btn_enabled(True)
else:
self.q.put("체크포인트를 입력해 주세요.")
######### Validation 버튼 동작 이벤트 #########
## path가 설정되어 있어야된다.
def Val_btn_clicked(self):
if self.use_checkpoint:
if self.dirselectedpath.text() != "no data":
self.set_all_btn_enabled(False)
blocknum = self.blocknum_slider.sl.value()
kwargs = {"blocknum": blocknum}
kwargs["data_path"] = self.dirselectedpath.text()
kwargs["size"] = int(self.imagesize_input.text())
kwargs["ck_path"] = self.ck_selectedpath.text()
logging.info('val start')
t = BaseThread(target=UI_validate, callback=self.set_all_btn_enabled, callback_args=(True,),
args=(self.mode, self.q), kwargs=kwargs)
t.start()
self.q.join()
else:
self.q.put("데이터를 입력해 주세요.")
else:
self.q.put("체크포인트를 입력해 주세요.")
def temp_btn_clicked(self):
self.set_all_btn_enabled(False)
t = BaseThread(target=UI_temp, callback=self.set_all_btn_enabled, callback_args=(True,),
args=(self.fileselectedpath.text(), self.q, self.model.module))
t.start()
self.q.join()
######### 데이터 디렉토리 선택 이벤트 #########
def Path_btn_clicked(self):
fname = QtWidgets.QFileDialog.getExistingDirectory(self, 'Open dir')
if len(fname) != 0:
logging.info(f"{fname} Test dir submitted")
self.dirselectedpath.setText(fname)
else:
QtWidgets.QMessageBox.about(self, "Warning", "Do not select Directory!")
######### 데이터 파일 선택 이벤트 #########
def filePath_btn_clicked(self):
fname = QtWidgets.QFileDialog.getOpenFileName(self, 'Open file', "",
"All Files(*);; Bitmap files(*.bmp);; Jpg files(*.jpg);; Png files(*.png)")
if fname[0]:
logging.info(f"{fname[0]} test file submitted")
self.fileselectedpath.setText(fname[0])
else:
QtWidgets.QMessageBox.about(self, "Warning", "do not select file!")
######### 체크포인트 파일 선택 이벤트 #########
def checkpoint_btn_clicked(self):
fname = QtWidgets.QFileDialog.getOpenFileName(self, 'Open file', "",
"All Files(*)")
if fname[0]:
self.ck_path = fname
self.use_checkpoint = True
logging.info(f"{fname[0]} checkpoint file submitted")
self.ck_selectedpath.setText(fname[0])
else:
QtWidgets.QMessageBox.about(self, "Warning", "do not select file!")
######### 딥러닝 모델이 작동하였을 때 다른 버튼을 누르면 안되므로 버튼 제어 #########
def set_all_btn_enabled(self, mode):
self.pushButton.setEnabled(mode)
self.pushButton_2.setEnabled(mode)
self.pushButton_3.setEnabled(mode)
self.pushButton_4.setEnabled(mode)
self.data_dir_select_btn.setEnabled(mode)
self.data_file_select_btn.setEnabled(mode)
self.ck_select_btn.setEnabled(mode)
######### Dialog 로그창 입력 함수 #########
def write(self, q):
while True:
try:
log = q.get()
logger.info(log)
q.task_done()
except Queue.Empty:
pass
if __name__ == "__main__":
import sys
import multiprocessing
multiprocessing.freeze_support()
app = QtWidgets.QApplication(sys.argv)
Dialog = Dialog_form()
ui = Ui_Dialog()
ui.setupUi(Dialog)
Dialog.show()
sys.exit(app.exec_())
File mode changed
No preview for this file type
No preview for this file type
No preview for this file type
No preview for this file type
No preview for this file type
No preview for this file type
No preview for this file type
No preview for this file type
No preview for this file type
No preview for this file type
No preview for this file type
No preview for this file type
No preview for this file type
No preview for this file type
No preview for this file type
# code in this file is adpated from rpmcruz/autoaugment
# https://github.com/rpmcruz/autoaugment/blob/master/transformations.py
import random
import PIL, PIL.ImageOps, PIL.ImageEnhance, PIL.ImageDraw
import numpy as np
import torch
from PIL import Image
def ShearX(img, v): # [-0.3, 0.3]
assert -0.3 <= v <= 0.3
if random.random() > 0.5:
v = -v
return img.transform(img.size, PIL.Image.AFFINE, (1, v, 0, 0, 1, 0))
def ShearY(img, v): # [-0.3, 0.3]
assert -0.3 <= v <= 0.3
if random.random() > 0.5:
v = -v
return img.transform(img.size, PIL.Image.AFFINE, (1, 0, 0, v, 1, 0))
def TranslateX(img, v): # [-150, 150] => percentage: [-0.45, 0.45]
assert -0.45 <= v <= 0.45
if random.random() > 0.5:
v = -v
v = v * img.size[0]
return img.transform(img.size, PIL.Image.AFFINE, (1, 0, v, 0, 1, 0))
def TranslateXabs(img, v): # [-150, 150] => percentage: [-0.45, 0.45]
assert 0 <= v
if random.random() > 0.5:
v = -v
return img.transform(img.size, PIL.Image.AFFINE, (1, 0, v, 0, 1, 0))
def TranslateY(img, v): # [-150, 150] => percentage: [-0.45, 0.45]
assert -0.45 <= v <= 0.45
if random.random() > 0.5:
v = -v
v = v * img.size[1]
return img.transform(img.size, PIL.Image.AFFINE, (1, 0, 0, 0, 1, v))
def TranslateYabs(img, v): # [-150, 150] => percentage: [-0.45, 0.45]
assert 0 <= v
if random.random() > 0.5:
v = -v
return img.transform(img.size, PIL.Image.AFFINE, (1, 0, 0, 0, 1, v))
def Rotate(img, v): # [-30, 30]
assert -30 <= v <= 30
if random.random() > 0.5:
v = -v
return img.rotate(v)
def AutoContrast(img, _):
return PIL.ImageOps.autocontrast(img)
def Invert(img, _):
return PIL.ImageOps.invert(img)
def Equalize(img, _):
return PIL.ImageOps.equalize(img)
def Flip(img, _): # not from the paper
return PIL.ImageOps.mirror(img)
def Solarize(img, v): # [0, 256]
assert 0 <= v <= 256
return PIL.ImageOps.solarize(img, v)
def SolarizeAdd(img, addition=0, threshold=128):
img_np = np.array(img).astype(np.int)
img_np = img_np + addition
img_np = np.clip(img_np, 0, 255)
img_np = img_np.astype(np.uint8)
img = Image.fromarray(img_np)
return PIL.ImageOps.solarize(img, threshold)
def Posterize(img, v): # [4, 8]
v = int(v)
v = max(1, v)
return PIL.ImageOps.posterize(img, v)
def Contrast(img, v): # [0.1,1.9]
assert 0.1 <= v <= 1.9
return PIL.ImageEnhance.Contrast(img).enhance(v)
def Color(img, v): # [0.1,1.9]
assert 0.1 <= v <= 1.9
return PIL.ImageEnhance.Color(img).enhance(v)
def Brightness(img, v): # [0.1,1.9]
assert 0.1 <= v <= 1.9
return PIL.ImageEnhance.Brightness(img).enhance(v)
def Sharpness(img, v): # [0.1,1.9]
assert 0.1 <= v <= 1.9
return PIL.ImageEnhance.Sharpness(img).enhance(v)
def Cutout(img, v): # [0, 60] => percentage: [0, 0.2]
assert 0.0 <= v <= 0.2
if v <= 0.:
return img
v = v * img.size[0]
return CutoutAbs(img, v)
def CutoutAbs(img, v): # [0, 60] => percentage: [0, 0.2]
# assert 0 <= v <= 20
if v < 0:
return img
w, h = img.size
x0 = np.random.uniform(w)
y0 = np.random.uniform(h)
x0 = int(max(0, x0 - v / 2.))
y0 = int(max(0, y0 - v / 2.))
x1 = min(w, x0 + v)
y1 = min(h, y0 + v)
xy = (x0, y0, x1, y1)
color = (125, 123, 114)
# color = (0, 0, 0)
img = img.copy()
PIL.ImageDraw.Draw(img).rectangle(xy, color)
return img
def SamplePairing(imgs): # [0, 0.4]
def f(img1, v):
i = np.random.choice(len(imgs))
img2 = PIL.Image.fromarray(imgs[i])
return PIL.Image.blend(img1, img2, v)
return f
def Identity(img, v):
return img
def augment_list(): # 16 oeprations and their ranges
# https://github.com/google-research/uda/blob/master/image/randaugment/policies.py#L57
# l = [
# (Identity, 0., 1.0),
# (ShearX, 0., 0.3), # 0
# (ShearY, 0., 0.3), # 1
# (TranslateX, 0., 0.33), # 2
# (TranslateY, 0., 0.33), # 3
# (Rotate, 0, 30), # 4
# (AutoContrast, 0, 1), # 5
# (Invert, 0, 1), # 6
# (Equalize, 0, 1), # 7
# (Solarize, 0, 110), # 8
# (Posterize, 4, 8), # 9
# # (Contrast, 0.1, 1.9), # 10
# (Color, 0.1, 1.9), # 11
# (Brightness, 0.1, 1.9), # 12
# (Sharpness, 0.1, 1.9), # 13
# # (Cutout, 0, 0.2), # 14
# # (SamplePairing(imgs), 0, 0.4), # 15
# ]
# https://github.com/tensorflow/tpu/blob/8462d083dd89489a79e3200bcc8d4063bf362186/models/official/efficientnet/autoaugment.py#L505
l = [
(AutoContrast, 0, 1),
(Equalize, 0, 1),
(Invert, 0, 1),
(Rotate, 0, 30),
(Posterize, 0, 4),
(Solarize, 0, 256),
(SolarizeAdd, 0, 110),
(Color, 0.1, 1.9),
(Contrast, 0.1, 1.9),
(Brightness, 0.1, 1.9),
(Sharpness, 0.1, 1.9),
(ShearX, 0., 0.3),
(ShearY, 0., 0.3),
(CutoutAbs, 0, 40),
(TranslateXabs, 0., 100),
(TranslateYabs, 0., 100),
]
return l
class Lighting(object):
"""Lighting noise(AlexNet - style PCA - based noise)"""
def __init__(self, alphastd, eigval, eigvec):
self.alphastd = alphastd
self.eigval = torch.Tensor(eigval)
self.eigvec = torch.Tensor(eigvec)
def __call__(self, img):
if self.alphastd == 0:
return img
alpha = img.new().resize_(3).normal_(0, self.alphastd)
rgb = self.eigvec.type_as(img).clone() \
.mul(alpha.view(1, 3).expand(3, 3)) \
.mul(self.eigval.view(1, 3).expand(3, 3)) \
.sum(1).squeeze()
return img.add(rgb.view(3, 1, 1).expand_as(img))
class CutoutDefault(object):
"""
Reference : https://github.com/quark0/darts/blob/master/cnn/utils.py
"""
def __init__(self, length):
self.length = length
def __call__(self, img):
h, w = img.size(1), img.size(2)
mask = np.ones((h, w), np.float32)
y = np.random.randint(h)
x = np.random.randint(w)
y1 = np.clip(y - self.length // 2, 0, h)
y2 = np.clip(y + self.length // 2, 0, h)
x1 = np.clip(x - self.length // 2, 0, w)
x2 = np.clip(x + self.length // 2, 0, w)
mask[y1: y2, x1: x2] = 0.
mask = torch.from_numpy(mask)
mask = mask.expand_as(img)
img *= mask
return img
class RandAugment:
def __init__(self, n, m):
self.n = n # augmentation을 적용하는 수.
self.m = m # [0, 30]
self.augment_list = augment_list()
def __call__(self, img):
ops = random.choices(self.augment_list, k=self.n)
for op, minval, maxval in ops:
val = (float(self.m) / 30) * float(maxval - minval) + minval
img = op(img, val)
return img
\ No newline at end of file
task: All
modelname: MobilenetV3
output: output
checkpoint: "output/All/14835_model=MobilenetV3-ep=3000-block=6/model_best.pth.tar"
gpu: [2]
data:
train: ../data/Fifth_data/All
val: ../data/Fifth_data/All
test: ../data/Fifth_data/All
train:
epochs: 3000
start-epoch: 0
batch-size: 256
worker: 16
resume: ''
augment: True
size: 224
confidence: False
weight: [1., 1., 1., 1., 1., 1., 1., 1.] #Crack, Double, Empty, Flip, Leave, Normal, Pollute, Scratch
predict:
batch-size: 256
worker: 16
cam: False
normalize: True
save: False
optimizer:
type: 'Adam'
lr: 0.001
momentum: 0.9
weight_decay: 0.0001
loss:
gamma: 2.
alpha: 0.8
model:
blocks: 6
class: 8
etc:
tensorboard: False
print_freq: 10
\ No newline at end of file
task: Type
modelname: MobilenetV3
output: output
checkpoint: "output/ErrorType/85804_model=MobilenetV3-ep=3000-block=4/model_best.pth.tar"
gpu: [1]
data:
train: ../data/Fifth_data/ErrorType
val: ../data/Fifth_data/ErrorType
test: ../data/Fifth_data/ErrorType
train:
epochs: 3000
start-epoch: 0
batch-size: 256
worker: 16
resume: ''
augment: True
size: 64
confidence: False
weight: [1., 1., 1., 1., 1., 1., 1.] #Crack, Double, Empty, Flip, Leave, Scratch
predict:
batch-size: 256
worker: 16
cam: False
normalize: True
save: False
optimizer:
type: 'SGD'
lr: 0.1
momentum: 0.9
weight_decay: 0.0001
loss:
gamma: 2.
alpha: 0.8
model:
blocks: 4
class: 7
etc:
tensorboard: False
print_freq: 10
\ No newline at end of file
task: Error
modelname: MobilenetV3
output: output
checkpoint: "output/Error/2456_model=MobilenetV3-ep=3000-block=4/model_best.pth.tar"
gpu: [1]
data:
train: ../data/Fifth_data/Error
val: ../data/Fifth_data/Error
test: ../data/Fifth_data/Error
train:
epochs: 3000
start-epoch: 0
batch-size: 256
worker: 16
resume: ''
augment: True
size: 64
confidence: False
weight: [1., 1.] #Error , Normal
predict:
batch-size: 256
worker: 16
cam: False
cam-class: "Error"
normalize: True
save: False
optimizer:
type: 'SGD'
lr: 0.1
momentum: 0.9
weight_decay: 0.0001
loss:
gamma: 2.
alpha: 0.8
model:
blocks: 4
class: 2
etc:
tensorboard: False
print_freq: 10
\ No newline at end of file
import torch
import torchvision
import torch.nn as nn
import argparse
from model import AutoEncoder, pytorch_autoencoder
from get_mean_std import get_params
from torchvision.utils import save_image
parser = argparse.ArgumentParser(description='Process autoencoder')
parser.add_argument('--config', type=str, help='select type')
args = parser.parse_args()
# Scratch에서만 넣은 데이터
data_path = "../data/Fourth_data/Auto_test"
checkpoint_path = "./dc_img/checkpoint.pth"
resize_size = 128
batch_size = 128
# 보고서를 참고하여 만든 autoencoder 와 pytorch 에서 제공하는 autoencoder
if args.config == "my":
model = AutoEncoder().cuda("cuda:1")
else:
model = pytorch_autoencoder().cuda("cuda:1")
checkpoint = torch.load(checkpoint_path)
model.load_state_dict(checkpoint)
print("checkpoint loaded finish!")
img_transform = torchvision.transforms.Compose([
torchvision.transforms.Resize((resize_size, resize_size)),
torchvision.transforms.Grayscale(),
torchvision.transforms.ToTensor(),
])
dataset = torchvision.datasets.ImageFolder(data_path, transform=img_transform)
dataloader = torch.utils.data.DataLoader(dataset, batch_size=batch_size, shuffle=False)
criterion = nn.L1Loss()
for idx, data in enumerate(dataloader):
img, _ = data
img = img.cuda("cuda:1")
output = model(img)
save_image(output, f'./dc_img/test_output_{idx}.png')
loss = criterion(output, img)
img = img - output
save_image(img, f'./dc_img/scratch_dif_{idx}.png')
print(f"loss : {loss}")
\ No newline at end of file
import torch
import torchvision
import torch.nn as nn
import argparse
from model import AutoEncoder, pytorch_autoencoder, AutoEncoder_s
from get_mean_std import get_params
from torchvision.utils import save_image
parser = argparse.ArgumentParser(description='Process autoencoder')
parser.add_argument('--config', type=str, help='select type')
args = parser.parse_args()
# 노말만 넣은 데이터
data_path = "../data/Fourth_data/Auto"
resize_size = 128
num_epochs = 100
batch_size = 128
learning_rate = 1e-3
# 보고서를 참고하여 만든 autoencoder 와 pytorch 에서 제공하는 autoencoder
if args.config == "my":
model = AutoEncoder().cuda("cuda:1")
elif args.config == "pytorch":
model = pytorch_autoencoder().cuda("cuda:1")
else:
model = AutoEncoder_s().cuda("cuda:1")
print(model)
#mean, std = get_params(data_path, resize_size)
img_transform = torchvision.transforms.Compose([
torchvision.transforms.Resize((resize_size, resize_size)),
torchvision.transforms.Grayscale(),
torchvision.transforms.ToTensor(),
])
dataset = torchvision.datasets.ImageFolder(data_path, transform=img_transform)
dataloader = torch.utils.data.DataLoader(dataset, batch_size=batch_size, shuffle=True)
criterion = nn.L1Loss()
optimizer = torch.optim.Adam(model.parameters(), lr=learning_rate, weight_decay=1e-4)
for epoch in range(num_epochs):
for data in dataloader:
img, _ = data
img = img.cuda("cuda:1")
output = model(img)
loss = criterion(output, img)
optimizer.zero_grad()
loss.backward()
optimizer.step()
print('epoch [{}/{}], loss:{:.4f}'.format(epoch+1, num_epochs, loss.item()))
if epoch % 10 ==0:
save_image(output, './dc_img/image_{}.png'.format(epoch))
torch.save(model.state_dict(), './dc_img/checkpoint.pth')
\ No newline at end of file
import torch
import torch.nn as nn
import os
import shutil
import logging
from model import mobilenetv3
from utils import get_args_from_yaml
import torchvision.datasets as datasets
from utils import AverageMeter, accuracy, printlog, precision, recall
import torchvision.transforms as transforms
from torch.utils.data.sampler import SubsetRandomSampler
import numpy as np
import time
from get_mean_std import get_params
model = mobilenetv3(n_class=7, blocknum=6, dropout=0.5)
model = model.train()
data_path = "../data/All"
check_path = "output/All/30114_model=MobilenetV3-ep=3000-block=6-class=8/model_best.pth.tar"
validation_ratio = 0.1
random_seed = 10
gpus=[0]
epochs = 3000
resize_size=128
logger = logging.getLogger()
logger.setLevel(logging.INFO)
streamHandler = logging.StreamHandler()
logger.addHandler(streamHandler)
fileHandler = logging.FileHandler("logs/finetune.log")
logger.addHandler(fileHandler)
def save_checkpoint(state, is_best, block =6, filename='checkpoint.pth.tar'):
"""Saves checkpoint to disk"""
directory = "%s/%s/" % ('output', 'All')
if not os.path.exists(directory):
os.makedirs(directory)
filename = directory + filename
torch.save(state, filename)
logger.info(f"Checkpoint Saved: {filename}")
best_filename = f"output/All/model_best.pth.tar"
if is_best:
shutil.copyfile(filename, best_filename)
logger.info(f"New Best Checkpoint saved: {best_filename}")
return best_filename
def validate(val_loader, model, criterion, epoch, q=None):
"""Perform validaadd_model_to_queuetion on the validation set"""
with torch.no_grad():
batch_time = AverageMeter()
losses = AverageMeter()
top1 = AverageMeter()
prec = []
rec = []
for i in range(7):
prec.append(AverageMeter())
rec.append(AverageMeter())
# switch to evaluate mode
model.eval()
end = time.time()
for i, (input, target) in enumerate(val_loader):
if torch.cuda.is_available():
target = target.cuda()
input = input.cuda()
# compute output
output = model(input)
loss = criterion(output, target)
# measure accuracy and record loss
prec1 = accuracy(output.data, target, topk=(1,))[0]
losses.update(loss.item(), input.size(0))
top1.update(prec1.item(), input.size(0))
for k in range(7):
prec[k].update(precision(output.data, target, target_class=k), input.size(0))
rec[k].update(recall(output.data, target, target_class=k), input.size(0))
# measure elapsed time
batch_time.update(time.time() - end)
end = time.time()
if i % 10 == 0:
logger.info('Test: [{0}/{1}]\t'
'Time {batch_time.val:.3f} ({batch_time.avg:.3f})\t'
'Loss {loss.val:.4f} ({loss.avg:.4f})\t'
'Prec@1 {top1.val:.3f} ({top1.avg:.3f})'
.format(
i, len(val_loader), batch_time=batch_time, loss=losses,
top1=top1))
printlog(' * epoch: {epoch} Prec@1 {top1.avg:.3f}'.format(epoch=epoch,top1=top1), logger, q)
return top1.avg, prec, rec
def train(model, train_loader, criterion, optimizer, epoch):
batch_time = AverageMeter()
losses = AverageMeter()
top1 = AverageMeter()
prec = AverageMeter()
# switch to train mode
model.train()
end = time.time()
for i, (input, target) in enumerate(train_loader):
if torch.cuda.is_available():
target = target.cuda()
input = input.cuda()
# compute output
output = model(input)
loss = criterion(output, target)
# measure accuracy and record loss
prec1 = accuracy(output, target, topk=(1,))[0]
losses.update(loss.item(), input.size(0))
top1.update(prec1.item(), input.size(0))
# compute gradient and do SGD step
optimizer.zero_grad()
loss.backward()
optimizer.step()
# measure elapsed time
batch_time.update(time.time() - end)
end = time.time()
if i % 10 == 0:
logger.info('Epoch: [{0}][{1}/{2}]\t'
'Time {batch_time.val:.3f} ({batch_time.avg:.3f})\t'
'Loss {loss.val:.4f} ({loss.avg:.4f})\t'
'Prec@1 {top1.val:.3f} ({top1.avg:.3f})\t'
.format(
epoch, i, len(train_loader), batch_time=batch_time,
loss=losses, top1=top1))
for idx, (name, module) in enumerate(model.named_modules()):
if(idx < 62):
for param in module.parameters():
param.requires_grad = False
else:
for param in module.parameters():
param.requires_grad = True
mean, std = get_params(data_path, resize_size)
normalize = transforms.Normalize(mean=[mean[0].item()],
std=[std[0].item()])
transform_train = transforms.Compose([
transforms.Resize((resize_size, resize_size)), # 가로세로 크기 조정
transforms.ColorJitter(0.2,0.2,0.2), # 밝기, 대비, 채도 조정
transforms.RandomRotation(2), # -2~ 2도 만큼 회전
transforms.RandomAffine(5), # affine 변환 (평행사변형이 된다든지, 사다리꼴이 된다든지)
transforms.RandomCrop(resize_size, padding=2), # 원본에서 padding을 상하좌우 2로 둔 뒤, 64만큼 자름
transforms.RandomHorizontalFlip(), # Data 변환 좌우 반전
transforms.Grayscale(),
transforms.ToTensor(),
normalize
])
transform_test = transforms.Compose([
transforms.Resize((resize_size, resize_size)),
transforms.Grayscale(),
transforms.ToTensor(),
normalize
])
kwargs = {'num_workers': 16, 'pin_memory': True}
train_data = datasets.ImageFolder(data_path, transform_train)
val_data = datasets.ImageFolder(data_path,transform_test)
num_train = len(train_data)
indices = list(range(num_train))
split = int(np.floor(validation_ratio * num_train))
# 랜덤 시드 설정. (Train이나 ,Test 일때 모두 10 이므로 같은 데이터셋이라 할 수 있다)
np.random.seed(random_seed)
np.random.shuffle(indices)
# Train set, Validation set 나누기.
train_idx, valid_idx = indices[split:], indices[:split]
train_sampler = SubsetRandomSampler(train_idx)
valid_sampler = SubsetRandomSampler(valid_idx)
train_loader = torch.utils.data.DataLoader(
train_data, batch_size=256, sampler=train_sampler, #shuffle = True
**kwargs)
val_loader = torch.utils.data.DataLoader(
val_data, batch_size=256, sampler=valid_sampler, #shuffle = False
**kwargs)
criterion = nn.CrossEntropyLoss()
optimizer = torch.optim.Adam(model.parameters(), 0.0001, weight_decay=0.0001)
if torch.cuda.is_available():
torch.cuda.set_device(gpus[0])
with torch.cuda.device(gpus[0]):
model = model.cuda()
criterion = criterion.cuda()
model = torch.nn.DataParallel(model, device_ids=gpus, output_device=gpus[0])
checkpoint = torch.load(check_path)
pretrained_dict = checkpoint['state_dict']
new_model_dict = model.state_dict()
for k, v in pretrained_dict.items():
if 'classifier' in k:
continue
new_model_dict.update({k : v})
model.load_state_dict(new_model_dict)
#model.load_state_dict(checkpoint['state_dict'], strict=False)
best_prec1 = checkpoint['best_prec1']
for epoch in range(epochs):
train(model, train_loader, criterion, optimizer, epoch)
prec1, prec, rec = validate(val_loader, model, criterion, epoch)
is_best = prec1 >= best_prec1
best_prec1 = max(prec1, best_prec1)
checkpoint = save_checkpoint({
'epoch': epoch + 1,
'state_dict': model.state_dict(),
'best_prec1': best_prec1,
}, is_best)
for i in range(len(prec)):
logger.info(' * Precision {prec.avg:.3f}'.format(prec=prec[i]))
logger.info(' * recall {rec.avg:.3f}'.format(rec=rec[i]))
\ No newline at end of file
import torch
import torch.nn as nn
import torch.nn.functional as F
from torch.autograd import Variable
class FocalLoss(nn.Module):
def __init__(self, gamma=0, alpha=None, size_average=True):
super(FocalLoss, self).__init__()
self.gamma = gamma
self.alpha = alpha
if isinstance(alpha,(float,int)): self.alpha = torch.Tensor([alpha,1-alpha])
if isinstance(alpha,list): self.alpha = torch.Tensor(alpha)
self.size_average = size_average
def forward(self, input, target):
if input.dim() > 2:
input = input.view(input.size(0),input.size(1),-1) # N,C,H,W => N,C,H*W
input = input.transpose(1,2) # N,C,H*W => N,H*W,C
input = input.contiguous().view(-1,input.size(2)) # N,H*W,C => N*H*W,C
target = target.view(-1,1)
logpt = F.log_softmax(input)
logpt = logpt.gather(1,target)
logpt = logpt.view(-1)
pt = Variable(logpt.data.exp())
#pt = logpt.data.exp() #pt = (256), input = (256,5), target = (256,1)
#pt는 logpt에 exponatial 적용
self.alpha = self.alpha.cuda()
if self.alpha is not None:
if self.alpha.type()!=input.data.type():
self.alpha = self.alpha.type_as(input.data)
at = self.alpha.gather(0,target.data.view(-1))
logpt = logpt * at
loss = -1 * (1-pt)**self.gamma * logpt
if self.size_average: return loss.mean()
else: return loss.sum()
\ No newline at end of file
import torch.multiprocessing as mp
import torch
import torchvision.datasets as datasets
import torchvision.transforms as transforms
import argparse
import numpy as np
from get_mean_std import get_params
from model import mobilenetv3
import parmap
#Image resize 계수
resize_size = 64
#Class 의 개수.
class_num = 7
#사용한 Random Seed
seeds = [39396, 2798, 3843, 62034, 8817, 65014, 45385]
#기기에 있는 GPU 개수.
gpu = 4
#저장된 Checkpoint.
checkpoints = [
"output/ErrorType/39396_model=MobilenetV3-ep=3000-block=4/checkpoint.pth.tar",
"output/ErrorType/2798_model=MobilenetV3-ep=3000-block=4/checkpoint.pth.tar",
"output/ErrorType/3843_model=MobilenetV3-ep=3000-block=4/checkpoint.pth.tar",
"output/ErrorType/62034_model=MobilenetV3-ep=3000-block=4/checkpoint.pth.tar",
"output/ErrorType/8817_model=MobilenetV3-ep=3000-block=4/checkpoint.pth.tar",
"output/ErrorType/65014_model=MobilenetV3-ep=3000-block=4/checkpoint.pth.tar",
"output/ErrorType/45385_model=MobilenetV3-ep=3000-block=4/checkpoint.pth.tar"
]
class AverageMeter(object):
"""Computes and stores the average and current value"""
def __init__(self):
self.reset()
def reset(self):
self.val = 0
self.avg = 0
self.sum = 0
self.count = 0
def update(self, val, n=1):
self.val = val
self.sum += val * n
self.count += n
self.avg = self.sum / self.count
def accuracy(output, target, topk=(1,)):
"""Computes the precision@k for the specified values of k"""
maxk = max(topk)
batch_size = target.size(0)
_, pred = output.topk(maxk, 1, True, True)
pred = pred.t()
correct = pred.eq(target.view(1, -1).expand_as(pred))
res = []
for k in topk:
correct_k = correct[:k].view(-1).float().sum(0)
res.append(correct_k.mul_(100.0 / batch_size))
return res
def get_models():
models=[]
for idx, checkpoint in enumerate(checkpoints):
gpu_idx = idx % gpu
weights = torch.load(checkpoint)
model = mobilenetv3(n_class=class_num)
torch.cuda.set_device(gpu_idx)
with torch.cuda.device(gpu_idx):
model = model.cuda()
model = torch.nn.DataParallel(model, device_ids=[gpu_idx], output_device=gpu_idx)
model.load_state_dict(weights['state_dict'])
model.share_memory()
models.append(model)
return models
def get_loader(path, resize_size):
mean, std = get_params(path, resize_size)
normalize = transforms.Normalize(mean=[mean[0].item()],
std=[std[0].item()])
transform = transforms.Compose([
transforms.Resize((resize_size, resize_size)),
transforms.Grayscale(),
transforms.ToTensor(),
normalize
])
dataset = datasets.ImageFolder(args.path, transform)
kwargs = {'num_workers': 4, 'pin_memory': True}
loader = torch.utils.data.DataLoader(dataset, batch_size=256, shuffle=False, **kwargs)
return loader
def get_data(processnum ,model, loader, return_dict):
with torch.no_grad():
top1 = AverageMeter()
model.eval()
gpu_idx = processnum % gpu
for i, data in enumerate(loader):
(input, target) = data
target = target.cuda(gpu_idx)
input = input.cuda(gpu_idx)
output = model(input)
prec1 = accuracy(output, target, topk=(1,))[0]
top1.update(prec1.item(), input.size(0))
return_dict[processnum] = top1.avg
if __name__ == '__main__':
mp.set_start_method('spawn')
parser = argparse.ArgumentParser()
parser.add_argument("--path", required=True, help="path")
args = parser.parse_args()
manager = mp.Manager()
return_dict = manager.dict()
# get one loader
loader = get_loader(args.path, resize_size)
# multi model with other checkpoint.
models = get_models()
#loader is not array so can arise error
processes = []
for i, model in enumerate(models):
p = mp.Process(target=get_data, args=(i, model, loader, return_dict))
p.start()
processes.append(p)
for p in processes: p.join()
for idx, seed in enumerate(seeds):
print(f"process {idx}, seed {seed} : {return_dict[idx]}")
print(f"total variance : {np.var(return_dict.values())}")
#print(return_dict.values())
\ No newline at end of file
import os
import numpy as np
import torch
import torch.backends.cudnn as cudnn
import torch.nn as nn
import torchvision.models as models
import torchvision.datasets as datasets
import torchvision.transforms as transforms
from torchvision.utils import save_image
from PIL.ImageOps import grayscale
from PIL import Image
from torchvision.datasets import ImageFolder
class MyDataset(ImageFolder):
def __init__(self, root, trainsform):
super(MyDataset, self).__init__(root, trainsform)
def __getitem__(self, index):
image, label = super(MyDataset, self).__getitem__(index)
return image, label
def get_params(path, resize_size):
my_transform = transforms.Compose([
transforms.Resize((resize_size,resize_size)),
transforms.Grayscale(),
transforms.ToTensor()
])
my_dataset = MyDataset(path, my_transform)
loader = torch.utils.data.DataLoader(
my_dataset,
batch_size=256,
num_workers=8,
shuffle=False
)
mean = 0.
std = 0.
nb_samples = 0.
for i, (data, target) in enumerate(loader):
batch_samples = data.size(0)
data = data.view(batch_samples, data.size(1), -1)
mean += data.mean(2).sum(0)
std += data.std(2).sum(0)
nb_samples += batch_samples
mean /= nb_samples
std /= nb_samples
print(f"mean : {mean} , std : {std}")
return mean, std
"""
my_transform = transforms.Compose([
transforms.Resize((64,64)),
transforms.ToTensor()
])
my_dataset = MyDataset("../data/Third_data/not_binary", my_transform)
loader = torch.utils.data.DataLoader(
my_dataset,
batch_size=256,
num_workers=8,
shuffle=False
)
mean = 0.
std = 0.
nb_samples = 0.
for i, (data, target) in enumerate(loader):
batch_samples = data.size(0)
data = data.view(batch_samples, data.size(1), -1)
mean += data.mean(2).sum(0)
std += data.std(2).sum(0)
nb_samples += batch_samples
mean /= nb_samples
std /= nb_samples
print(f"mean : {mean}, std : {std}")
"""
\ No newline at end of file
import os
import time
import sys
import torch.nn.functional as F
import numpy as np
import PIL
import torch
import torch.backends.cudnn as cudnn
import torch.nn as nn
import torch.nn.parallel
import torch.utils.data
import torchvision.datasets as datasets
import torchvision.transforms as transforms
import yaml
import cv2
from get_mean_std import get_params
sys.path.append(os.path.join(os.path.dirname(__name__)))
from model import mobilenetv3
if not os.path.exists("threshold"):
os.mkdir("threshold")
thresholds = [.05, .1, .15, .2, .25, .3, .35, .4, .45, .5]
for threshold in thresholds:
if not os.path.exists(f"threshold/{threshold}"):
os.mkdir(f"threshold/{threshold}")
def get_args_from_yaml(file='trainer/configs/Error_config.yml'):
with open(file) as f:
conf = yaml.load(f)
return conf
class MyImageFolder(datasets.ImageFolder):
def __getitem__(self, index):
# return image path
return super(MyImageFolder, self).__getitem__(index), self.imgs[index]
def main(args):
run_model(args)
print(f"[{args['id']}] done")
def run_model(args):
resize_size = args['train']['size']
gpus = args['gpu']
mean, std = get_params(args['data']['train'], resize_size)
normalize = transforms.Normalize(mean=[mean[0].item()],
std=[std[0].item()])
normalize_factor = [mean, std]
# data loader
transform_test = transforms.Compose([
transforms.Resize((resize_size,resize_size)),
transforms.Grayscale(),
transforms.ToTensor(),
normalize
])
kwargs = {'num_workers': args['predict']['worker'], 'pin_memory': True}
test_data = MyImageFolder(args['data']['val'], transform_test)
val_loader = torch.utils.data.DataLoader(
test_data, batch_size=args['predict']['batch-size'], shuffle=False,
**kwargs)
# load model
model = mobilenetv3(n_class= args['model']['class'], blocknum= args['model']['blocks'])
torch.cuda.set_device(gpus[0])
with torch.cuda.device(gpus[0]):
model = model.cuda()
model = torch.nn.DataParallel(model, device_ids=gpus, output_device=gpus[0])
print("=> loading checkpoint '{}'".format(args['checkpoint']))
checkpoint = torch.load(args['checkpoint'])
model.load_state_dict(checkpoint['state_dict'])
print("=> loaded checkpoint '{}' (epoch {})"
.format(args['checkpoint'], checkpoint['epoch']))
cudnn.benchmark = True
extract_data(val_loader, model, normalize_factor, args)
def extract_data(val_loader, model, normalize_factor, args):
with torch.no_grad():
# switch to evaluate mode
model.eval()
for data in(val_loader):
(input, target), (path , _) = data
target = target.cuda()
input = input.cuda()
output = model(input)
print("save data!")
save_data(output, target, path)
class AverageMeter(object):
def __init__(self):
self.reset()
def reset(self):
self.val = 0
self.avg = 0
self.sum = 0
self.count = 0
def update(self, val, n=1):
self.val = val
self.sum += val * n
self.count += n
self.avg = self.sum / self.count
def accuracy(output, target, topk=(1,)):
"""Computes the precision@k for the specified values of k"""
maxk = max(topk)
batch_size = target.size(0)
_, pred = output.topk(maxk, 1, True, True)
pred = pred.t()
correct = pred.eq(target.view(1, -1).expand_as(pred))
res = []
for k in topk:
correct_k = correct[:k].view(-1).float().sum(0)
res.append(correct_k.mul_(100.0 / batch_size))
return res
def save_data(output, target, path):
n_digits = 3
prob = F.softmax(output, dim=1)
prob = torch.round(prob * 10**n_digits) / (10**n_digits)
for idx, p in enumerate(prob):
value = torch.topk(p, 2).values
indice = torch.topk(p,2).indices
value = value.tolist()
indice = indice.tolist()
gap = abs(value[0]-value[1])
for threshold in thresholds:
if(gap < threshold):
img = cv2.imread(path[idx])
filename = path[idx].split('/')[-1]
cv2.imwrite(f'threshold/{threshold}/pred_{indice[0]}_{indice[1]}_{filename}', img)
if __name__ == '__main__':
args = get_args_from_yaml('configs/All_config.yml')
args['config'] = 'All'
args['id'] = 'threshold'
main(args)
\ No newline at end of file
This diff could not be displayed because it is too large.
2020-03-31-19-11-26
use seed 963
use dataset : ../data/Fourth_data/All
{'task': 'All/train_2020-03-31-19-11-26_model=MobilenetV3-ep=3000-block=6', 'modelname': 'MobilenetV3', 'output': 'output', 'checkpoint': 'output/All/16166_model=MobilenetV3-ep=3000-block=6/model_best.pth.tar', 'gpu': [2], 'data': {'train': '../data/Fourth_data/All', 'val': '../data/Fourth_data/All', 'test': '../data/Fourth_data/All'}, 'train': {'epochs': 3000, 'start-epoch': 0, 'batch-size': 256, 'worker': 16, 'weight': [2.0, 4.0, 1.0, 1.0, 3.0, 1.0, 1.0], 'resume': '', 'augment': True, 'size': 224, 'confidence': False}, 'predict': {'batch-size': 256, 'worker': 64, 'cam': False, 'normalize': True, 'save': False}, 'optimizer': {'lr': 0.1, 'momentum': 0.9, 'weight_decay': 0.0001}, 'loss': {'gamma': 2.0, 'alpha': 0.8}, 'model': {'blocks': 6, 'class': 7}, 'etc': {'tensorboard': False, 'print_freq': 10}, 'id': 'train_2020-03-31-19-11-26'}
2020-04-03-17-46-05
use seed 635
use dataset : E:/code/detection/data/Fifth_data/All
{'task': 'All/train_2020-04-03-17-46-05_model=MobilenetV3-ep=4000-block=6-class=8', 'modelname': 'MobilenetV3', 'output': 'output', 'checkpoint': 'output/All/14835_model=MobilenetV3-ep=3000-block=6/model_best.pth.tar', 'gpu': [2], 'data': {'train': 'E:/code/detection/data/Fifth_data/All', 'val': '../data/Fifth_data/All', 'test': '../data/Fifth_data/All'}, 'train': {'epochs': 4000, 'start-epoch': 0, 'batch-size': 256, 'worker': 16, 'resume': 'E:/code/detection/trainer/output/All/14835_model=MobilenetV3-ep=3000-block=6/checkpoint.pth.tar', 'augment': True, 'size': 224, 'confidence': False, 'weight': [1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0]}, 'predict': {'batch-size': 256, 'worker': 16, 'cam': False, 'normalize': True, 'save': False}, 'optimizer': {'type': 'SGD', 'lr': 0.001, 'momentum': 0.9, 'weight_decay': 0.0001}, 'loss': {'gamma': 2.0, 'alpha': 0.8}, 'model': {'blocks': 6, 'class': 8}, 'etc': {'tensorboard': False, 'print_freq': 10}, 'id': 'train_2020-04-03-17-46-05'}
Number of model parameters: 462840
=> loading checkpoint 'E:/code/detection/trainer/output/All/14835_model=MobilenetV3-ep=3000-block=6/checkpoint.pth.tar'
=> loaded checkpoint 'E:/code/detection/trainer/output/All/14835_model=MobilenetV3-ep=3000-block=6/checkpoint.pth.tar' (epoch 3000)
Epoch: [0][0/26] Time 113.958 (113.958) Loss 0.0051 (0.0051) Prec@1 100.000 (100.000)
2020-04-08-19-38-36
use seed 283
use dataset : E:/code/detection/data/Fifth_data/All
{'task': 'All/train_2020-04-08-19-38-36_model=MobilenetV3-ep=3000-block=6-class=8', 'modelname': 'MobilenetV3', 'output': 'output', 'checkpoint': 'output/All/14835_model=MobilenetV3-ep=3000-block=6/model_best.pth.tar', 'gpu': [2], 'data': {'train': 'E:/code/detection/data/Fifth_data/All', 'val': '../data/Fifth_data/All', 'test': '../data/Fifth_data/All'}, 'train': {'epochs': 3000, 'start-epoch': 0, 'batch-size': 256, 'worker': 16, 'resume': '', 'augment': True, 'size': 224, 'confidence': False, 'weight': [1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0]}, 'predict': {'batch-size': 256, 'worker': 16, 'cam': False, 'normalize': True, 'save': False}, 'optimizer': {'type': 'SGD', 'lr': 0.001, 'momentum': 0.9, 'weight_decay': 0.0001}, 'loss': {'gamma': 2.0, 'alpha': 0.8}, 'model': {'blocks': 6, 'class': 8}, 'etc': {'tensorboard': False, 'print_freq': 10}, 'id': 'train_2020-04-08-19-38-36'}
Fatal error in main loop
Traceback (most recent call last):
File "E:\code\detection\trainer\train.py", line 104, in main
run_model(args, q)
File "E:\code\detection\trainer\train.py", line 221, in run_model
scheduler = torch.optim.lr_scheduler.CosineAnnealingLR(optimizer, len(train_loader))
TypeError: object of type 'DataLoader' has no len()
[train_2020-04-08-19-38-36] failed
This diff could not be displayed because it is too large.
2020-03-31-18-30-33
use seed 626
use dataset : ../data/Fourth_data/Error
{'task': 'Error/train_2020-03-31-18-30-33_model=MobilenetV3-ep=3000-block=4', 'modelname': 'MobilenetV3', 'output': 'output', 'checkpoint': 'output/Error/12613_model=MobilenetV3-ep=3000-block=4/model_best.pth.tar', 'gpu': [0], 'data': {'train': '../data/Fourth_data/Error', 'val': '../data/Fourth_data/Error', 'test': '../data/Fourth_data/Error'}, 'train': {'epochs': 3000, 'start-epoch': 0, 'batch-size': 256, 'worker': 16, 'resume': '', 'augment': True, 'size': 64, 'confidence': False, 'weight': [1.0, 2.0]}, 'predict': {'batch-size': 256, 'worker': 16, 'cam': False, 'cam-class': 'Error', 'normalize': True}, 'optimizer': {'lr': 0.1, 'momentum': 0.9, 'weight_decay': 0.0001}, 'loss': {'gamma': 2.0, 'alpha': 0.8}, 'model': {'blocks': 4, 'class': 2}, 'etc': {'tensorboard': False, 'print_freq': 10}, 'id': 'train_2020-03-31-18-30-33'}
Number of model parameters: 154706
Fatal error in main loop
Traceback (most recent call last):
File "E:\code\detection\trainer\train.py", line 91, in main
run_model(args, q)
File "E:\code\detection\trainer\train.py", line 264, in run_model
train(train_loader, model, criterion, optimizer, scheduler, epoch, args, q)
File "E:\code\detection\trainer\train.py", line 309, in train
for i, (input, target) in enumerate(train_loader):
File "C:\Users\younho\AppData\Local\Programs\Python\Python36\lib\site-packages\torch\utils\data\dataloader.py", line 279, in __iter__
return _MultiProcessingDataLoaderIter(self)
File "C:\Users\younho\AppData\Local\Programs\Python\Python36\lib\site-packages\torch\utils\data\dataloader.py", line 719, in __init__
w.start()
File "C:\Users\younho\AppData\Local\Programs\Python\Python36\lib\multiprocessing\process.py", line 105, in start
self._popen = self._Popen(self)
File "C:\Users\younho\AppData\Local\Programs\Python\Python36\lib\multiprocessing\context.py", line 223, in _Popen
return _default_context.get_context().Process._Popen(process_obj)
File "C:\Users\younho\AppData\Local\Programs\Python\Python36\lib\multiprocessing\context.py", line 322, in _Popen
return Popen(process_obj)
File "C:\Users\younho\AppData\Local\Programs\Python\Python36\lib\multiprocessing\popen_spawn_win32.py", line 65, in __init__
reduction.dump(process_obj, to_child)
File "C:\Users\younho\AppData\Local\Programs\Python\Python36\lib\multiprocessing\reduction.py", line 60, in dump
ForkingPickler(file, protocol).dump(obj)
BrokenPipeError: [Errno 32] Broken pipe
[train_2020-03-31-18-30-33] failed
2020-04-01-17-53-24
use seed 420
use dataset : ../data/Fifth_data/Error
{'task': 'Error/train_2020-04-01-17-53-24_model=MobilenetV3-ep=3000-block=4-class=2', 'modelname': 'MobilenetV3', 'output': 'output', 'checkpoint': 'output/Error/12613_model=MobilenetV3-ep=3000-block=4/model_best.pth.tar', 'gpu': [1], 'data': {'train': '../data/Fifth_data/Error', 'val': '../data/Fifth_data/Error', 'test': '../data/Fifth_data/Error'}, 'train': {'epochs': 3000, 'start-epoch': 0, 'batch-size': 1024, 'worker': 16, 'resume': '', 'augment': True, 'size': 64, 'confidence': False, 'weight': [1.0, 1.0]}, 'predict': {'batch-size': 1024, 'worker': 16, 'cam': False, 'cam-class': 'Error', 'normalize': True}, 'optimizer': {'type': 'SGD', 'lr': 0.1, 'momentum': 0.9, 'weight_decay': 0.0001}, 'loss': {'gamma': 2.0, 'alpha': 0.8}, 'model': {'blocks': 4, 'class': 2}, 'etc': {'tensorboard': False, 'print_freq': 10}, 'id': 'train_2020-04-01-17-53-24'}
Fatal error in main loop
Traceback (most recent call last):
File "E:\code\detection\trainer\train.py", line 91, in main
run_model(args, q)
File "E:\code\detection\trainer\train.py", line 125, in run_model
mean, std = get_params(args['data']['train'], resize_size)
File "E:\code\detection\trainer\get_mean_std.py", line 31, in get_params
my_dataset = MyDataset(path, my_transform)
File "E:\code\detection\trainer\get_mean_std.py", line 16, in __init__
super(MyDataset, self).__init__(root, trainsform)
File "C:\Users\younho\AppData\Local\Programs\Python\Python36\lib\site-packages\torchvision\datasets\folder.py", line 209, in __init__
target_transform=target_transform)
File "C:\Users\younho\AppData\Local\Programs\Python\Python36\lib\site-packages\torchvision\datasets\folder.py", line 83, in __init__
classes, class_to_idx = self._find_classes(root)
File "C:\Users\younho\AppData\Local\Programs\Python\Python36\lib\site-packages\torchvision\datasets\folder.py", line 116, in _find_classes
classes = [d.name for d in os.scandir(dir) if d.is_dir()]
FileNotFoundError: [WinError 3] 지정된 경로를 찾을 수 없습니다: '../data/Fifth_data/Error'
[train_2020-04-01-17-53-24] failed
2020-04-01-18-16-36
use seed 95
use dataset : ../data/Fifth_data/Error
{'task': 'Error/train_2020-04-01-18-16-36_model=MobilenetV3-ep=3000-block=4-class=2', 'modelname': 'MobilenetV3', 'output': 'output', 'checkpoint': 'output/Error/12613_model=MobilenetV3-ep=3000-block=4/model_best.pth.tar', 'gpu': [1], 'data': {'train': '../data/Fifth_data/Error', 'val': '../data/Fifth_data/Error', 'test': '../data/Fifth_data/Error'}, 'train': {'epochs': 3000, 'start-epoch': 0, 'batch-size': 1024, 'worker': 16, 'resume': '', 'augment': True, 'size': 64, 'confidence': False, 'weight': [1.0, 1.0]}, 'predict': {'batch-size': 1024, 'worker': 16, 'cam': False, 'cam-class': 'Error', 'normalize': True}, 'optimizer': {'type': 'SGD', 'lr': 0.1, 'momentum': 0.9, 'weight_decay': 0.0001}, 'loss': {'gamma': 2.0, 'alpha': 0.8}, 'model': {'blocks': 4, 'class': 2}, 'etc': {'tensorboard': False, 'print_freq': 10}, 'id': 'train_2020-04-01-18-16-36'}
Number of model parameters: 154706
Epoch: [0][0/7] Time 43.018 (43.018) Loss 0.6986 (0.6986) Prec@1 34.473 (34.473) Precision 0.000 (0.000)
2020-04-01-20-18-29
use seed 997
use dataset : ../data/Fifth_data/Error
{'task': 'Error/train_2020-04-01-20-18-29_model=MobilenetV3-ep=3000-block=6-class=2', 'modelname': 'MobilenetV3', 'output': 'output', 'checkpoint': 'output/Error/12613_model=MobilenetV3-ep=3000-block=4/model_best.pth.tar', 'gpu': [1], 'data': {'train': '../data/Fifth_data/Error', 'val': '../data/Fifth_data/Error', 'test': '../data/Fifth_data/Error'}, 'train': {'epochs': 3000, 'start-epoch': 0, 'batch-size': 1024, 'worker': 16, 'resume': '', 'augment': True, 'size': 64, 'confidence': False, 'weight': [1.0, 1.0]}, 'predict': {'batch-size': 1024, 'worker': 16, 'cam': False, 'cam-class': 'Error', 'normalize': True, 'save': False}, 'optimizer': {'type': 'SGD', 'lr': 0.1, 'momentum': 0.9, 'weight_decay': 0.0001}, 'loss': {'gamma': 2.0, 'alpha': 0.8}, 'model': {'blocks': 6, 'class': 2}, 'etc': {'tensorboard': False, 'print_freq': 10}, 'id': 'train_2020-04-01-20-18-29'}
Fatal error in main loop
Traceback (most recent call last):
File "E:\code\detection\trainer\train.py", line 93, in main
run_model(args, q)
File "E:\code\detection\trainer\train.py", line 127, in run_model
mean, std = get_params(args['data']['train'], resize_size)
File "E:\code\detection\trainer\get_mean_std.py", line 43, in get_params
for i, (data, target) in enumerate(loader):
File "C:\Users\younho\AppData\Local\Programs\Python\Python36\lib\site-packages\torch\utils\data\dataloader.py", line 279, in __iter__
return _MultiProcessingDataLoaderIter(self)
File "C:\Users\younho\AppData\Local\Programs\Python\Python36\lib\site-packages\torch\utils\data\dataloader.py", line 719, in __init__
w.start()
File "C:\Users\younho\AppData\Local\Programs\Python\Python36\lib\multiprocessing\process.py", line 105, in start
self._popen = self._Popen(self)
File "C:\Users\younho\AppData\Local\Programs\Python\Python36\lib\multiprocessing\context.py", line 223, in _Popen
return _default_context.get_context().Process._Popen(process_obj)
File "C:\Users\younho\AppData\Local\Programs\Python\Python36\lib\multiprocessing\context.py", line 322, in _Popen
return Popen(process_obj)
File "C:\Users\younho\AppData\Local\Programs\Python\Python36\lib\multiprocessing\popen_spawn_win32.py", line 65, in __init__
reduction.dump(process_obj, to_child)
File "C:\Users\younho\AppData\Local\Programs\Python\Python36\lib\multiprocessing\reduction.py", line 60, in dump
ForkingPickler(file, protocol).dump(obj)
BrokenPipeError: [Errno 32] Broken pipe
[train_2020-04-01-20-18-29] failed
2020-04-01-21-07-25
use seed 880
use dataset : ../data/Fifth_data/Error
{'task': 'Error/train_2020-04-01-21-07-25_model=MobilenetV3-ep=3000-block=4-class=2', 'modelname': 'MobilenetV3', 'output': 'output', 'checkpoint': 'output/Error/12613_model=MobilenetV3-ep=3000-block=4/model_best.pth.tar', 'gpu': [1], 'data': {'train': '../data/Fifth_data/Error', 'val': '../data/Fifth_data/Error', 'test': '../data/Fifth_data/Error'}, 'train': {'epochs': 3000, 'start-epoch': 0, 'batch-size': 1024, 'worker': 16, 'resume': '', 'augment': True, 'size': 64, 'confidence': False, 'weight': [1.0, 1.0]}, 'predict': {'batch-size': 1024, 'worker': 16, 'cam': False, 'cam-class': 'Error', 'normalize': True, 'save': False}, 'optimizer': {'type': 'SGD', 'lr': 0.1, 'momentum': 0.9, 'weight_decay': 0.0001}, 'loss': {'gamma': 2.0, 'alpha': 0.8}, 'model': {'blocks': 4, 'class': 2}, 'etc': {'tensorboard': False, 'print_freq': 10}, 'id': 'train_2020-04-01-21-07-25'}
2020-04-01-22-40-24
use seed 238
use dataset : ../data/Fifth_data/Error
{'task': 'Error/train_2020-04-01-22-40-24_model=MobilenetV3-ep=3000-block=4-class=2', 'modelname': 'MobilenetV3', 'output': 'output', 'checkpoint': 'output/Error/12613_model=MobilenetV3-ep=3000-block=4/model_best.pth.tar', 'gpu': [1], 'data': {'train': '../data/Fifth_data/Error', 'val': '../data/Fifth_data/Error', 'test': '../data/Fifth_data/Error'}, 'train': {'epochs': 3000, 'start-epoch': 0, 'batch-size': 1024, 'worker': 16, 'resume': '', 'augment': True, 'size': 64, 'confidence': False, 'weight': [1.0, 1.0]}, 'predict': {'batch-size': 1024, 'worker': 16, 'cam': False, 'cam-class': 'Error', 'normalize': True, 'save': False}, 'optimizer': {'type': 'SGD', 'lr': 0.1, 'momentum': 0.9, 'weight_decay': 0.0001}, 'loss': {'gamma': 2.0, 'alpha': 0.8}, 'model': {'blocks': 4, 'class': 2}, 'etc': {'tensorboard': False, 'print_freq': 10}, 'id': 'train_2020-04-01-22-40-24'}
Number of model parameters: 154706
Epoch: [0][0/7] Time 44.533 (44.533) Loss 0.6956 (0.6956) Prec@1 41.504 (41.504) Precision 0.000 (0.000)
2020-04-01-23-15-24
use seed 666
use dataset : ../data/Fifth_data/Error
{'task': 'Error/train_2020-04-01-23-15-24_model=MobilenetV3-ep=3000-block=4-class=2', 'modelname': 'MobilenetV3', 'output': 'output', 'checkpoint': 'output/Error/2456_model=MobilenetV3-ep=3000-block=4/model_best.pth.tar', 'gpu': [1], 'data': {'train': '../data/Fifth_data/Error', 'val': '../data/Fifth_data/Error', 'test': '../data/Fifth_data/Error'}, 'train': {'epochs': 3000, 'start-epoch': 0, 'batch-size': 1024, 'worker': 16, 'resume': '', 'augment': True, 'size': 64, 'confidence': False, 'weight': [1.0, 1.0]}, 'predict': {'batch-size': 1024, 'worker': 16, 'cam': False, 'cam-class': 'Error', 'normalize': True, 'save': False}, 'optimizer': {'type': 'SGD', 'lr': 0.1, 'momentum': 0.9, 'weight_decay': 0.0001}, 'loss': {'gamma': 2.0, 'alpha': 0.8}, 'model': {'blocks': 4, 'class': 2}, 'etc': {'tensorboard': False, 'print_freq': 10}, 'id': 'train_2020-04-01-23-15-24'}
2020-04-03-17-02-42
use seed 185
use dataset : E:/code/detection/data/Fifth_data/All
{'task': 'Error/train_2020-04-03-17-02-42_model=MobilenetV3-ep=3000-block=4-class=2', 'modelname': 'MobilenetV3', 'output': 'output', 'checkpoint': 'output/Error/2456_model=MobilenetV3-ep=3000-block=4/model_best.pth.tar', 'gpu': [1], 'data': {'train': 'E:/code/detection/data/Fifth_data/All', 'val': '../data/Fifth_data/Error', 'test': '../data/Fifth_data/Error'}, 'train': {'epochs': '3000', 'start-epoch': 0, 'batch-size': '256', 'worker': 16, 'resume': '', 'augment': True, 'size': 64, 'confidence': False, 'weight': [1.0, 1.0]}, 'predict': {'batch-size': '256', 'worker': 16, 'cam': False, 'cam-class': 'Error', 'normalize': True, 'save': False}, 'optimizer': {'type': 'SGD', 'lr': '0.001', 'momentum': 0.9, 'weight_decay': 0.0001}, 'loss': {'gamma': 2.0, 'alpha': 0.8}, 'model': {'blocks': 4, 'class': 2}, 'etc': {'tensorboard': False, 'print_freq': 10}, 'id': 'train_2020-04-03-17-02-42'}
Fatal error in main loop
Traceback (most recent call last):
File "E:\code\detection\trainer\train.py", line 102, in main
run_model(args, q)
File "E:\code\detection\trainer\train.py", line 195, in run_model
**kwargs)
File "C:\Users\younho\AppData\Local\Programs\Python\Python36\lib\site-packages\torch\utils\data\dataloader.py", line 219, in __init__
batch_sampler = BatchSampler(sampler, batch_size, drop_last)
File "C:\Users\younho\AppData\Local\Programs\Python\Python36\lib\site-packages\torch\utils\data\sampler.py", line 190, in __init__
"but got batch_size={}".format(batch_size))
ValueError: batch_size should be a positive integer value, but got batch_size=256
[train_2020-04-03-17-02-42] failed
2020-04-03-17-04-30
use seed 54
use dataset : E:/code/detection/data/Fifth_data/All
{'task': 'Error/train_2020-04-03-17-04-30_model=MobilenetV3-ep=3000-block=4-class=2', 'modelname': 'MobilenetV3', 'output': 'output', 'checkpoint': 'output/Error/2456_model=MobilenetV3-ep=3000-block=4/model_best.pth.tar', 'gpu': [1], 'data': {'train': 'E:/code/detection/data/Fifth_data/All', 'val': '../data/Fifth_data/Error', 'test': '../data/Fifth_data/Error'}, 'train': {'epochs': '3000', 'start-epoch': 0, 'batch-size': '256', 'worker': 16, 'resume': '', 'augment': True, 'size': 64, 'confidence': False, 'weight': [1.0, 1.0]}, 'predict': {'batch-size': '256', 'worker': 16, 'cam': False, 'cam-class': 'Error', 'normalize': True, 'save': False}, 'optimizer': {'type': 'SGD', 'lr': '0.001', 'momentum': 0.9, 'weight_decay': 0.0001}, 'loss': {'gamma': 2.0, 'alpha': 0.8}, 'model': {'blocks': 4, 'class': 2}, 'etc': {'tensorboard': False, 'print_freq': 10}, 'id': 'train_2020-04-03-17-04-30'}
Fatal error in main loop
Traceback (most recent call last):
File "E:\code\detection\trainer\train.py", line 103, in main
run_model(args, q)
File "E:\code\detection\trainer\train.py", line 196, in run_model
**kwargs)
File "C:\Users\younho\AppData\Local\Programs\Python\Python36\lib\site-packages\torch\utils\data\dataloader.py", line 219, in __init__
batch_sampler = BatchSampler(sampler, batch_size, drop_last)
File "C:\Users\younho\AppData\Local\Programs\Python\Python36\lib\site-packages\torch\utils\data\sampler.py", line 190, in __init__
"but got batch_size={}".format(batch_size))
ValueError: batch_size should be a positive integer value, but got batch_size=256
[train_2020-04-03-17-04-30] failed
2020-04-03-17-07-00
use seed 809
use dataset : E:/code/detection/data/Fifth_data/All
{'task': 'Error/train_2020-04-03-17-07-00_model=MobilenetV3-ep=3000-block=4-class=2', 'modelname': 'MobilenetV3', 'output': 'output', 'checkpoint': 'output/Error/2456_model=MobilenetV3-ep=3000-block=4/model_best.pth.tar', 'gpu': [1], 'data': {'train': 'E:/code/detection/data/Fifth_data/All', 'val': '../data/Fifth_data/Error', 'test': '../data/Fifth_data/Error'}, 'train': {'epochs': '3000', 'start-epoch': 0, 'batch-size': '256', 'worker': 16, 'resume': '', 'augment': True, 'size': 64, 'confidence': False, 'weight': [1.0, 1.0]}, 'predict': {'batch-size': '256', 'worker': 16, 'cam': False, 'cam-class': 'Error', 'normalize': True, 'save': False}, 'optimizer': {'type': 'SGD', 'lr': '0.001', 'momentum': 0.9, 'weight_decay': 0.0001}, 'loss': {'gamma': 2.0, 'alpha': 0.8}, 'model': {'blocks': 4, 'class': 2}, 'etc': {'tensorboard': False, 'print_freq': 10}, 'id': 'train_2020-04-03-17-07-00'}
Fatal error in main loop
Traceback (most recent call last):
File "E:\code\detection\trainer\train.py", line 103, in main
run_model(args, q)
File "E:\code\detection\trainer\train.py", line 196, in run_model
**kwargs)
File "C:\Users\younho\AppData\Local\Programs\Python\Python36\lib\site-packages\torch\utils\data\dataloader.py", line 219, in __init__
batch_sampler = BatchSampler(sampler, batch_size, drop_last)
File "C:\Users\younho\AppData\Local\Programs\Python\Python36\lib\site-packages\torch\utils\data\sampler.py", line 190, in __init__
"but got batch_size={}".format(batch_size))
ValueError: batch_size should be a positive integer value, but got batch_size=256
[train_2020-04-03-17-07-00] failed
2020-04-03-17-08-43
use seed 420
use dataset : E:/code/detection/data/Fifth_data/All
{'task': 'Error/train_2020-04-03-17-08-43_model=MobilenetV3-ep=3000-block=4-class=2', 'modelname': 'MobilenetV3', 'output': 'output', 'checkpoint': 'output/Error/2456_model=MobilenetV3-ep=3000-block=4/model_best.pth.tar', 'gpu': [1], 'data': {'train': 'E:/code/detection/data/Fifth_data/All', 'val': '../data/Fifth_data/Error', 'test': '../data/Fifth_data/Error'}, 'train': {'epochs': 3000, 'start-epoch': 0, 'batch-size': 256, 'worker': 16, 'resume': '', 'augment': True, 'size': 64, 'confidence': False, 'weight': [1.0, 1.0]}, 'predict': {'batch-size': 256, 'worker': 16, 'cam': False, 'cam-class': 'Error', 'normalize': True, 'save': False}, 'optimizer': {'type': 'SGD', 'lr': 0.001, 'momentum': 0.9, 'weight_decay': 0.0001}, 'loss': {'gamma': 2.0, 'alpha': 0.8}, 'model': {'blocks': 4, 'class': 2}, 'etc': {'tensorboard': False, 'print_freq': 10}, 'id': 'train_2020-04-03-17-08-43'}
Number of model parameters: 154706
Fatal error in main loop
Traceback (most recent call last):
File "E:\code\detection\trainer\train.py", line 102, in main
run_model(args, q)
File "E:\code\detection\trainer\train.py", line 276, in run_model
train(train_loader, model, criterion, optimizer, scheduler, epoch, args, q)
File "E:\code\detection\trainer\train.py", line 327, in train
loss = criterion(output, target)
File "C:\Users\younho\AppData\Local\Programs\Python\Python36\lib\site-packages\torch\nn\modules\module.py", line 532, in __call__
result = self.forward(*input, **kwargs)
File "C:\Users\younho\AppData\Local\Programs\Python\Python36\lib\site-packages\torch\nn\modules\loss.py", line 916, in forward
ignore_index=self.ignore_index, reduction=self.reduction)
File "C:\Users\younho\AppData\Local\Programs\Python\Python36\lib\site-packages\torch\nn\functional.py", line 2021, in cross_entropy
return nll_loss(log_softmax(input, 1), target, weight, None, ignore_index, None, reduction)
File "C:\Users\younho\AppData\Local\Programs\Python\Python36\lib\site-packages\torch\nn\functional.py", line 1838, in nll_loss
ret = torch._C._nn.nll_loss(input, target, weight, _Reduction.get_enum(reduction), ignore_index)
IndexError: Target 5 is out of bounds.
[train_2020-04-03-17-08-43] failed
2020-04-03-17-09-59
use seed 420
use dataset : E:/code/detection/data/Fifth_data/Error
{'task': 'Error/train_2020-04-03-17-09-59_model=MobilenetV3-ep=3000-block=4-class=2', 'modelname': 'MobilenetV3', 'output': 'output', 'checkpoint': 'output/Error/2456_model=MobilenetV3-ep=3000-block=4/model_best.pth.tar', 'gpu': [1], 'data': {'train': 'E:/code/detection/data/Fifth_data/Error', 'val': '../data/Fifth_data/Error', 'test': '../data/Fifth_data/Error'}, 'train': {'epochs': 3000, 'start-epoch': 0, 'batch-size': 256, 'worker': 16, 'resume': '', 'augment': True, 'size': 64, 'confidence': False, 'weight': [1.0, 1.0]}, 'predict': {'batch-size': 256, 'worker': 16, 'cam': False, 'cam-class': 'Error', 'normalize': True, 'save': False}, 'optimizer': {'type': 'SGD', 'lr': 0.001, 'momentum': 0.9, 'weight_decay': 0.0001}, 'loss': {'gamma': 2.0, 'alpha': 0.8}, 'model': {'blocks': 4, 'class': 2}, 'etc': {'tensorboard': False, 'print_freq': 10}, 'id': 'train_2020-04-03-17-09-59'}
Number of model parameters: 154706
Epoch: [0][0/26] Time 32.853 (32.853) Loss 0.6907 (0.6907) Prec@1 56.641 (56.641) Precision 0.000 (0.000)
2020-04-03-17-09-59
use seed 420
use dataset : E:/code/detection/data/Fifth_data/Error
{'task': 'Error/train_2020-04-03-17-09-59_model=MobilenetV3-ep=3000-block=4-class=2', 'modelname': 'MobilenetV3', 'output': 'output', 'checkpoint': 'output/Error/2456_model=MobilenetV3-ep=3000-block=4/model_best.pth.tar', 'gpu': [1], 'data': {'train': 'E:/code/detection/data/Fifth_data/Error', 'val': '../data/Fifth_data/Error', 'test': '../data/Fifth_data/Error'}, 'train': {'epochs': 3000, 'start-epoch': 0, 'batch-size': 256, 'worker': 16, 'resume': '', 'augment': True, 'size': 64, 'confidence': False, 'weight': [1.0, 1.0]}, 'predict': {'batch-size': 256, 'worker': 16, 'cam': False, 'cam-class': 'Error', 'normalize': True, 'save': False}, 'optimizer': {'type': 'SGD', 'lr': 0.001, 'momentum': 0.9, 'weight_decay': 0.0001}, 'loss': {'gamma': 2.0, 'alpha': 0.8}, 'model': {'blocks': 4, 'class': 2}, 'etc': {'tensorboard': False, 'print_freq': 10}, 'id': 'train_2020-04-03-17-09-59'}
Number of model parameters: 154706
Epoch: [0][0/26] Time 32.853 (32.853) Loss 0.6907 (0.6907) Prec@1 56.641 (56.641) Precision 0.000 (0.000)
2020-04-03-17-22-28
use seed 845
use dataset : E:/code/detection/data/Fifth_data/Error
{'task': 'Error/train_2020-04-03-17-22-28_model=MobilenetV3-ep=1000-block=5-class=2', 'modelname': 'MobilenetV3', 'output': 'output', 'checkpoint': 'output/Error/2456_model=MobilenetV3-ep=3000-block=4/model_best.pth.tar', 'gpu': [1], 'data': {'train': 'E:/code/detection/data/Fifth_data/Error', 'val': '../data/Fifth_data/Error', 'test': '../data/Fifth_data/Error'}, 'train': {'epochs': 1000, 'start-epoch': 0, 'batch-size': 128, 'worker': 16, 'resume': '', 'augment': True, 'size': 128, 'confidence': False, 'weight': [1.0, 1.0]}, 'predict': {'batch-size': 128, 'worker': 16, 'cam': False, 'cam-class': 'Error', 'normalize': True, 'save': False}, 'optimizer': {'type': 'SGD', 'lr': 0.01, 'momentum': 0.9, 'weight_decay': 0.0001}, 'loss': {'gamma': 2.0, 'alpha': 0.8}, 'model': {'blocks': 5, 'class': 2}, 'etc': {'tensorboard': False, 'print_freq': 10}, 'id': 'train_2020-04-03-17-22-28'}
Number of model parameters: 400114
2020-04-08-19-37-53
use seed 41
use dataset : E:/code/detection/data/Fifth_data/All
{'task': 'Error/train_2020-04-08-19-37-53_model=MobilenetV3-ep=3000-block=4-class=2', 'modelname': 'MobilenetV3', 'output': 'output', 'checkpoint': 'output/Error/2456_model=MobilenetV3-ep=3000-block=4/model_best.pth.tar', 'gpu': [1], 'data': {'train': 'E:/code/detection/data/Fifth_data/All', 'val': '../data/Fifth_data/Error', 'test': '../data/Fifth_data/Error'}, 'train': {'epochs': 3000, 'start-epoch': 0, 'batch-size': 256, 'worker': 16, 'resume': '', 'augment': True, 'size': 64, 'confidence': False, 'weight': [1.0, 1.0]}, 'predict': {'batch-size': 256, 'worker': 16, 'cam': False, 'cam-class': 'Error', 'normalize': True, 'save': False}, 'optimizer': {'type': 'SGD', 'lr': 0.001, 'momentum': 0.9, 'weight_decay': 0.0001}, 'loss': {'gamma': 2.0, 'alpha': 0.8}, 'model': {'blocks': 4, 'class': 2}, 'etc': {'tensorboard': False, 'print_freq': 10}, 'id': 'train_2020-04-08-19-37-53'}
This diff could not be displayed because it is too large.
2020-04-01-19-51-23
use seed 355
use dataset : ../data/Fifth_data/ErrorType
{'task': 'ErrorType/train_2020-04-01-19-51-23_model=MobilenetV3-ep=3000-block=6-class=7', 'modelname': 'MobilenetV3', 'output': 'output', 'checkpoint': 'output/ErrorType/2715_model=MobilenetV3-ep=3000-block=6/model_best.pth.tar', 'gpu': [1], 'data': {'train': '../data/Fifth_data/ErrorType', 'val': '../data/Fifth_data/ErrorType', 'test': '../data/Fifth_data/ErrorType'}, 'train': {'epochs': 3000, 'start-epoch': 0, 'batch-size': 4048, 'worker': 16, 'resume': '', 'augment': True, 'size': 64, 'confidence': False, 'weight': [1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0]}, 'predict': {'batch-size': 4048, 'worker': 16, 'cam': False, 'normalize': True, 'save': False}, 'optimizer': {'type': 'SGD', 'lr': 0.1, 'momentum': 0.9, 'weight_decay': 0.0001}, 'loss': {'gamma': 2.0, 'alpha': 0.8}, 'model': {'blocks': 6, 'class': 7}, 'etc': {'tensorboard': False, 'print_freq': 10}, 'id': 'train_2020-04-01-19-51-23'}
Number of model parameters: 461559
2020-04-01-22-42-16
use seed 805
use dataset : ../data/Fifth_data/ErrorType
{'task': 'Type/train_2020-04-01-22-42-16_model=MobilenetV3-ep=3000-block=6-class=7', 'modelname': 'MobilenetV3', 'output': 'output', 'checkpoint': 'output/ErrorType/2715_model=MobilenetV3-ep=3000-block=6/model_best.pth.tar', 'gpu': [1], 'data': {'train': '../data/Fifth_data/ErrorType', 'val': '../data/Fifth_data/ErrorType', 'test': '../data/Fifth_data/ErrorType'}, 'train': {'epochs': 3000, 'start-epoch': 0, 'batch-size': 4048, 'worker': 16, 'resume': '', 'augment': True, 'size': 64, 'confidence': False, 'weight': [1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0]}, 'predict': {'batch-size': 4048, 'worker': 16, 'cam': False, 'normalize': True, 'save': False}, 'optimizer': {'type': 'SGD', 'lr': 0.1, 'momentum': 0.9, 'weight_decay': 0.0001}, 'loss': {'gamma': 2.0, 'alpha': 0.8}, 'model': {'blocks': 6, 'class': 7}, 'etc': {'tensorboard': False, 'print_freq': 10}, 'id': 'train_2020-04-01-22-42-16'}
Number of model parameters: 461559
Number of model parameters: 161111
=> loading checkpoint 'output/All/70933_model=MobilenetV3-ep=3000-block=4/model_best.pth.tar'
=> loaded checkpoint 'output/All/70933_model=MobilenetV3-ep=3000-block=4/model_best.pth.tar' (epoch 1304)
Test: [0/14] Time 4.107 (4.107) Loss 66.0858 (66.0858) Prec@1 12.891 (12.891)
Test: [1/14] Time 0.289 (2.198) Loss 64.3240 (65.2049) Prec@1 15.625 (14.258)
Test: [2/14] Time 0.362 (1.586) Loss 65.8078 (65.4059) Prec@1 13.672 (14.062)
Test: [3/14] Time 0.347 (1.276) Loss 68.0060 (66.0559) Prec@1 10.156 (13.086)
Test: [4/14] Time 0.259 (1.073) Loss 68.7825 (66.6012) Prec@1 10.156 (12.500)
Test: [5/14] Time 0.285 (0.941) Loss 67.9427 (66.8248) Prec@1 12.109 (12.435)
Test: [6/14] Time 0.346 (0.856) Loss 66.3187 (66.7525) Prec@1 11.328 (12.277)
Test: [7/14] Time 0.246 (0.780) Loss 65.7671 (66.6293) Prec@1 14.062 (12.500)
Test: [8/14] Time 0.316 (0.728) Loss 65.9718 (66.5563) Prec@1 12.109 (12.457)
Test: [9/14] Time 0.236 (0.679) Loss 64.3964 (66.3403) Prec@1 14.844 (12.695)
Test: [10/14] Time 0.291 (0.644) Loss 65.9720 (66.3068) Prec@1 11.328 (12.571)
Test: [11/14] Time 0.339 (0.619) Loss 66.6106 (66.3321) Prec@1 12.109 (12.533)
Test: [12/14] Time 0.243 (0.590) Loss 64.1202 (66.1620) Prec@1 14.062 (12.650)
Test: [13/14] Time 0.551 (0.587) Loss 63.1240 (66.0631) Prec@1 13.393 (12.674)
* Prec@1 12.674
Best accuracy: 90.11627924719522
[eval] done
Number of model parameters: 161111
=> loading checkpoint 'output/All/70933_model=MobilenetV3-ep=3000-block=4/model_best.pth.tar'
=> loaded checkpoint 'output/All/70933_model=MobilenetV3-ep=3000-block=4/model_best.pth.tar' (epoch 1304)
Test: [0/14] Time 3.525 (3.525) Loss 0.2238 (0.2238) Prec@1 91.797 (91.797)
Test: [1/14] Time 0.027 (1.776) Loss 0.1470 (0.1854) Prec@1 93.750 (92.773)
Test: [2/14] Time 0.025 (1.192) Loss 0.2031 (0.1913) Prec@1 93.359 (92.969)
Test: [3/14] Time 0.021 (0.900) Loss 0.1564 (0.1826) Prec@1 94.922 (93.457)
Test: [4/14] Time 0.025 (0.725) Loss 0.1664 (0.1793) Prec@1 93.359 (93.438)
Test: [5/14] Time 0.082 (0.617) Loss 0.3156 (0.2020) Prec@1 89.844 (92.839)
Test: [6/14] Time 0.028 (0.533) Loss 0.2379 (0.2072) Prec@1 92.188 (92.746)
Test: [7/14] Time 0.021 (0.469) Loss 0.1547 (0.2006) Prec@1 95.312 (93.066)
Test: [8/14] Time 0.032 (0.421) Loss 0.2915 (0.2107) Prec@1 90.625 (92.795)
Test: [9/14] Time 0.028 (0.381) Loss 0.2297 (0.2126) Prec@1 92.188 (92.734)
Test: [10/14] Time 0.023 (0.349) Loss 0.1834 (0.2099) Prec@1 94.922 (92.933)
Test: [11/14] Time 0.028 (0.322) Loss 0.1838 (0.2078) Prec@1 94.141 (93.034)
Test: [12/14] Time 0.027 (0.299) Loss 0.1991 (0.2071) Prec@1 92.188 (92.969)
Test: [13/14] Time 0.043 (0.281) Loss 0.0723 (0.2027) Prec@1 97.321 (93.110)
* Prec@1 93.110
Best accuracy: 93.11046504530796
[eval] done
Number of model parameters: 161111
=> loading checkpoint 'output/All/70933_model=MobilenetV3-ep=3000-block=4/checkpoint.pth.tar'
=> loaded checkpoint 'output/All/70933_model=MobilenetV3-ep=3000-block=4/checkpoint.pth.tar' (epoch 3000)
Test: [0/14] Time 4.212 (4.212) Loss 52.0523 (52.0523) Prec@1 16.406 (16.406)
Test: [1/14] Time 0.064 (2.138) Loss 54.9665 (53.5094) Prec@1 12.891 (14.648)
Test: [2/14] Time 0.065 (1.447) Loss 52.2416 (53.0868) Prec@1 16.016 (15.104)
Test: [3/14] Time 0.062 (1.101) Loss 52.7707 (53.0078) Prec@1 14.453 (14.941)
Test: [4/14] Time 0.059 (0.892) Loss 53.9052 (53.1873) Prec@1 14.062 (14.766)
Test: [5/14] Time 0.075 (0.756) Loss 53.5808 (53.2528) Prec@1 10.938 (14.128)
Test: [6/14] Time 0.079 (0.659) Loss 54.1428 (53.3800) Prec@1 12.109 (13.839)
Test: [7/14] Time 0.064 (0.585) Loss 56.6207 (53.7851) Prec@1 10.156 (13.379)
Test: [8/14] Time 0.061 (0.527) Loss 54.8389 (53.9022) Prec@1 12.891 (13.325)
Test: [9/14] Time 0.072 (0.481) Loss 58.1075 (54.3227) Prec@1 9.766 (12.969)
Test: [10/14] Time 0.070 (0.444) Loss 57.8013 (54.6389) Prec@1 7.812 (12.500)
Test: [11/14] Time 0.076 (0.413) Loss 55.8743 (54.7419) Prec@1 10.938 (12.370)
Test: [12/14] Time 0.058 (0.386) Loss 51.2172 (54.4708) Prec@1 16.406 (12.680)
Test: [13/14] Time 0.060 (0.363) Loss 53.5995 (54.4424) Prec@1 12.500 (12.674)
* Prec@1 12.674
Creating CAM
Fatal error in main loop
Traceback (most recent call last):
File "test.py", line 173, in main
run_model(args, q)
File "test.py", line 255, in run_model
prec1 = validate(val_loader, model, criterion, normalize_factor, args ,q)
File "test.py", line 313, in validate
f"eval_results/{args['task']}/error_case/cam" , model, normalize_factor, cam_class=args['predict']['cam-class'])
KeyError: 'cam-class'
[eval] failed
Number of model parameters: 161111
=> loading checkpoint 'output/All/70933_model=MobilenetV3-ep=3000-block=4/checkpoint.pth.tar'
=> loaded checkpoint 'output/All/70933_model=MobilenetV3-ep=3000-block=4/checkpoint.pth.tar' (epoch 3000)
Test: [0/14] Time 4.173 (4.173) Loss 0.2717 (0.2717) Prec@1 89.062 (89.062)
Test: [1/14] Time 0.023 (2.098) Loss 0.3759 (0.3238) Prec@1 89.062 (89.062)
Test: [2/14] Time 0.024 (1.407) Loss 0.2627 (0.3035) Prec@1 91.016 (89.714)
Test: [3/14] Time 0.235 (1.114) Loss 0.2139 (0.2811) Prec@1 92.969 (90.527)
Test: [4/14] Time 0.014 (0.894) Loss 0.2226 (0.2694) Prec@1 92.969 (91.016)
Test: [5/14] Time 0.013 (0.747) Loss 0.2957 (0.2738) Prec@1 90.625 (90.951)
Test: [6/14] Time 0.014 (0.642) Loss 0.3337 (0.2823) Prec@1 89.453 (90.737)
Test: [7/14] Time 0.014 (0.564) Loss 0.3640 (0.2925) Prec@1 89.062 (90.527)
Test: [8/14] Time 0.014 (0.503) Loss 0.2417 (0.2869) Prec@1 91.016 (90.582)
Test: [9/14] Time 0.013 (0.454) Loss 0.2122 (0.2794) Prec@1 91.406 (90.664)
Test: [10/14] Time 0.011 (0.414) Loss 0.1912 (0.2714) Prec@1 94.531 (91.016)
Test: [11/14] Time 0.013 (0.380) Loss 0.3103 (0.2746) Prec@1 91.016 (91.016)
Test: [12/14] Time 0.012 (0.352) Loss 0.2584 (0.2734) Prec@1 92.188 (91.106)
Test: [13/14] Time 0.039 (0.330) Loss 0.3947 (0.2773) Prec@1 89.286 (91.047)
* Prec@1 91.047
Creating CAM
Fatal error in main loop
Traceback (most recent call last):
File "test.py", line 173, in main
run_model(args, q)
File "test.py", line 255, in run_model
prec1 = validate(val_loader, model, criterion, normalize_factor, args ,q)
File "test.py", line 313, in validate
f"eval_results/{args['task']}/error_case/cam" , model, normalize_factor, cam_class=args['predict']['cam-class'])
File "/home/yh9468/detection/trainer/visualize/grad_cam.py", line 164, in make_grad_cam
image_paths.remove('eval_results/Error/error_case/cam')
ValueError: list.remove(x): x not in list
[eval] failed
Number of model parameters: 161111
=> loading checkpoint 'output/All/70933_model=MobilenetV3-ep=3000-block=4/checkpoint.pth.tar'
=> loaded checkpoint 'output/All/70933_model=MobilenetV3-ep=3000-block=4/checkpoint.pth.tar' (epoch 3000)
Test: [0/14] Time 4.361 (4.361) Loss 0.3446 (0.3446) Prec@1 89.844 (89.844)
Test: [1/14] Time 0.020 (2.190) Loss 0.2700 (0.3073) Prec@1 91.016 (90.430)
Test: [2/14] Time 0.218 (1.533) Loss 0.3154 (0.3100) Prec@1 91.406 (90.755)
Test: [3/14] Time 0.018 (1.154) Loss 0.2044 (0.2836) Prec@1 90.625 (90.723)
Test: [4/14] Time 0.019 (0.927) Loss 0.3468 (0.2963) Prec@1 88.672 (90.312)
Test: [5/14] Time 0.019 (0.776) Loss 0.2885 (0.2950) Prec@1 89.453 (90.169)
Test: [6/14] Time 0.017 (0.667) Loss 0.2948 (0.2949) Prec@1 91.016 (90.290)
Test: [7/14] Time 0.016 (0.586) Loss 0.2294 (0.2867) Prec@1 92.969 (90.625)
Test: [8/14] Time 0.018 (0.523) Loss 0.3430 (0.2930) Prec@1 89.844 (90.538)
Test: [9/14] Time 0.016 (0.472) Loss 0.2377 (0.2875) Prec@1 92.188 (90.703)
Test: [10/14] Time 0.013 (0.430) Loss 0.3161 (0.2901) Prec@1 92.578 (90.874)
Test: [11/14] Time 0.014 (0.396) Loss 0.2830 (0.2895) Prec@1 89.453 (90.755)
Test: [12/14] Time 0.010 (0.366) Loss 0.1243 (0.2768) Prec@1 96.094 (91.166)
Test: [13/14] Time 0.051 (0.344) Loss 0.2939 (0.2773) Prec@1 87.500 (91.047)
* Prec@1 91.047
Creating CAM
Fatal error in main loop
Traceback (most recent call last):
File "test.py", line 173, in main
run_model(args, q)
File "test.py", line 255, in run_model
prec1 = validate(val_loader, model, criterion, normalize_factor, args ,q)
File "test.py", line 313, in validate
f"eval_results/{args['task']}/error_case/cam" , model, normalize_factor, cam_class=args['predict']['cam-class'], args=args)
File "/home/yh9468/detection/trainer/visualize/grad_cam.py", line 220, in make_grad_cam
image_paths[j].split('/')[-1], arch, target_layer, classes[ids[j, i]]
IndexError: list index out of range
[eval] failed
Number of model parameters: 161111
=> loading checkpoint 'output/All/63674_model=MobilenetV3-ep=3000-block=4/checkpoint.pth.tar'
=> loaded checkpoint 'output/All/63674_model=MobilenetV3-ep=3000-block=4/checkpoint.pth.tar' (epoch 1647)
Test: [0/2] Time 4.475 (4.475) Loss 0.4215 (0.4215) Prec@1 92.578 (92.578)
Test: [1/2] Time 0.047 (2.261) Loss 0.9263 (0.5382) Prec@1 88.312 (91.592)
* Prec@1 91.592
Creating CAM
Fatal error in main loop
Traceback (most recent call last):
File "test.py", line 173, in main
run_model(args, q)
File "test.py", line 255, in run_model
prec1 = validate(val_loader, model, criterion, normalize_factor, args ,q)
File "test.py", line 313, in validate
f"eval_results/{args['task']}/error_case/cam" , model, normalize_factor, cam_class=args['predict']['cam-class'], args=args)
File "/home/yh9468/detection/trainer/visualize/grad_cam.py", line 221, in make_grad_cam
image_paths[j].split('/')[-1], arch, target_layer, classes[ids[j, i]]
IndexError: list index out of range
[eval] failed
Number of model parameters: 161111
=> loading checkpoint 'output/All/63674_model=MobilenetV3-ep=3000-block=4/model_best.pth.tar'
=> loaded checkpoint 'output/All/63674_model=MobilenetV3-ep=3000-block=4/model_best.pth.tar' (epoch 1633)
Test: [0/2] Time 5.110 (5.110) Loss 0.3949 (0.3949) Prec@1 92.969 (92.969)
Test: [1/2] Time 0.040 (2.575) Loss 0.3570 (0.3862) Prec@1 96.104 (93.694)
* Prec@1 93.694
Best accuracy: 93.6936941519156
[eval] done
Number of model parameters: 161111
=> loading checkpoint 'output/All/63674_model=MobilenetV3-ep=3000-block=4/model_best.pth.tar'
=> loaded checkpoint 'output/All/63674_model=MobilenetV3-ep=3000-block=4/model_best.pth.tar' (epoch 1633)
Fatal error in main loop
Traceback (most recent call last):
File "test.py", line 173, in main
run_model(args, q)
File "test.py", line 255, in run_model
prec1 = validate(val_loader, model, criterion, normalize_factor, args ,q)
File "test.py", line 288, in validate
save_error_case(output.data, target, path, args, topk=(1,), input=input, save_correct=False)
File "test.py", line 359, in save_error_case
cv2.imwrite(f"eval_results/{args['task']}/error_case/idx_{error_case_idx}_label_{class_arr[target[idx]]}_pred_{class_arr[pred[idx][0]]}={prob[idx][0]}_{class_arr[pred[idx][1]]}={prob[idx][1]}_real.bmp" ,img)
IndexError: invalid index of a 0-dim tensor. Use tensor.item() to convert a 0-dim tensor to a Python number
[eval] failed
Number of model parameters: 161111
=> loading checkpoint 'output/All/63674_model=MobilenetV3-ep=3000-block=4/model_best.pth.tar'
=> loaded checkpoint 'output/All/63674_model=MobilenetV3-ep=3000-block=4/model_best.pth.tar' (epoch 1633)
Fatal error in main loop
Traceback (most recent call last):
File "test.py", line 173, in main
run_model(args, q)
File "test.py", line 255, in run_model
prec1 = validate(val_loader, model, criterion, normalize_factor, args ,q)
File "test.py", line 288, in validate
save_error_case(output.data, target, path, args, topk=(1,), input=input, save_correct=False)
File "test.py", line 361, in save_error_case
cv2.imwrite(f"eval_results/{args['task']}/error_case/idx_{error_case_idx}_label_{class_arr[target[idx]]}_pred_{class_arr[pred[idx][0]]}={prob[idx][0]}_{class_arr[pred[idx][1]]}={prob[idx][1]}_real.bmp" ,img)
IndexError: invalid index of a 0-dim tensor. Use tensor.item() to convert a 0-dim tensor to a Python number
[eval] failed
Number of model parameters: 161111
=> loading checkpoint 'output/All/63674_model=MobilenetV3-ep=3000-block=4/model_best.pth.tar'
=> loaded checkpoint 'output/All/63674_model=MobilenetV3-ep=3000-block=4/model_best.pth.tar' (epoch 1633)
Fatal error in main loop
Traceback (most recent call last):
File "test.py", line 173, in main
run_model(args, q)
File "test.py", line 255, in run_model
prec1 = validate(val_loader, model, criterion, normalize_factor, args ,q)
File "test.py", line 288, in validate
save_error_case(output.data, target, path, args, topk=(1,), input=input, save_correct=False)
File "test.py", line 364, in save_error_case
cv2.imwrite(f"eval_results/{args['task']}/error_case/idx_{error_case_idx}_label_{class_arr[target[idx]]}_pred_{class_arr[pred[idx][0]]}={prob[idx][0]}_{class_arr[pred[idx][1]]}={prob[idx][1]}_real.bmp" ,img)
IndexError: invalid index of a 0-dim tensor. Use tensor.item() to convert a 0-dim tensor to a Python number
[eval] failed
Number of model parameters: 161111
=> loading checkpoint 'output/All/63674_model=MobilenetV3-ep=3000-block=4/model_best.pth.tar'
=> loaded checkpoint 'output/All/63674_model=MobilenetV3-ep=3000-block=4/model_best.pth.tar' (epoch 1633)
Fatal error in main loop
Traceback (most recent call last):
File "test.py", line 173, in main
run_model(args, q)
File "test.py", line 255, in run_model
prec1 = validate(val_loader, model, criterion, normalize_factor, args ,q)
File "test.py", line 288, in validate
save_error_case(output.data, target, path, args, topk=(1,), input=input, save_correct=False)
File "test.py", line 363, in save_error_case
cv2.imwrite(f"eval_results/{args['task']}/error_case/idx_{error_case_idx}_label_{class_arr[target[idx]]}_pred_{class_arr[pred[idx][0]]}={prob[idx][0]}_{class_arr[pred[idx][1]]}={prob[idx][1]}_real.bmp" ,img)
IndexError: invalid index of a 0-dim tensor. Use tensor.item() to convert a 0-dim tensor to a Python number
[eval] failed
Number of model parameters: 161111
=> loading checkpoint 'output/All/63674_model=MobilenetV3-ep=3000-block=4/model_best.pth.tar'
=> loaded checkpoint 'output/All/63674_model=MobilenetV3-ep=3000-block=4/model_best.pth.tar' (epoch 1633)
Fatal error in main loop
Traceback (most recent call last):
File "test.py", line 173, in main
run_model(args, q)
File "test.py", line 255, in run_model
prec1 = validate(val_loader, model, criterion, normalize_factor, args ,q)
File "test.py", line 288, in validate
save_error_case(output.data, target, path, args, topk=(1,), input=input, save_correct=False)
File "test.py", line 336, in save_error_case
prob, pred = output.topk(maxk, 2, True, True)
IndexError: Dimension out of range (expected to be in range of [-2, 1], but got 2)
[eval] failed
Number of model parameters: 161111
=> loading checkpoint 'output/All/63674_model=MobilenetV3-ep=3000-block=4/model_best.pth.tar'
=> loaded checkpoint 'output/All/63674_model=MobilenetV3-ep=3000-block=4/model_best.pth.tar' (epoch 1633)
Fatal error in main loop
Traceback (most recent call last):
File "test.py", line 173, in main
run_model(args, q)
File "test.py", line 255, in run_model
prec1 = validate(val_loader, model, criterion, normalize_factor, args ,q)
File "test.py", line 288, in validate
save_error_case(output.data, target, path, args, topk=(1,), input=input, save_correct=False)
File "test.py", line 341, in save_error_case
pred = pred.view(batch_size)
RuntimeError: shape '[256]' is invalid for input of size 512
[eval] failed
Number of model parameters: 161111
=> loading checkpoint 'output/All/63674_model=MobilenetV3-ep=3000-block=4/model_best.pth.tar'
=> loaded checkpoint 'output/All/63674_model=MobilenetV3-ep=3000-block=4/model_best.pth.tar' (epoch 1633)
Fatal error in main loop
Traceback (most recent call last):
File "test.py", line 173, in main
run_model(args, q)
File "test.py", line 255, in run_model
prec1 = validate(val_loader, model, criterion, normalize_factor, args ,q)
File "test.py", line 288, in validate
save_error_case(output.data, target, path, args, topk=(1,), input=input, save_correct=False)
File "test.py", line 341, in save_error_case
pred = pred.view(batch_size, -1)
RuntimeError: invalid argument 2: view size is not compatible with input tensor's size and stride (at least one dimension spans across two contiguous subspaces). Call .contiguous() before .view(). at /pytorch/aten/src/THC/generic/THCTensor.cpp:209
[eval] failed
Number of model parameters: 161111
=> loading checkpoint 'output/All/63674_model=MobilenetV3-ep=3000-block=4/model_best.pth.tar'
=> loaded checkpoint 'output/All/63674_model=MobilenetV3-ep=3000-block=4/model_best.pth.tar' (epoch 1633)
Fatal error in main loop
Traceback (most recent call last):
File "test.py", line 173, in main
run_model(args, q)
File "test.py", line 255, in run_model
prec1 = validate(val_loader, model, criterion, normalize_factor, args ,q)
File "test.py", line 288, in validate
save_error_case(output.data, target, path, args, topk=(1,), input=input, save_correct=False)
File "test.py", line 343, in save_error_case
pred = pred.view(batch_size, -1)
RuntimeError: invalid argument 2: view size is not compatible with input tensor's size and stride (at least one dimension spans across two contiguous subspaces). Call .contiguous() before .view(). at /pytorch/aten/src/THC/generic/THCTensor.cpp:209
[eval] failed
Number of model parameters: 161111
=> loading checkpoint 'output/All/63674_model=MobilenetV3-ep=3000-block=4/model_best.pth.tar'
=> loaded checkpoint 'output/All/63674_model=MobilenetV3-ep=3000-block=4/model_best.pth.tar' (epoch 2109)
Fatal error in main loop
Traceback (most recent call last):
File "test.py", line 173, in main
run_model(args, q)
File "test.py", line 255, in run_model
prec1 = validate(val_loader, model, criterion, normalize_factor, args ,q)
File "test.py", line 288, in validate
save_error_case(output.data, target, path, args, topk=(1,), input=input, save_correct=False)
File "test.py", line 344, in save_error_case
pred = pred.view(batch_size, -1)
RuntimeError: invalid argument 2: view size is not compatible with input tensor's size and stride (at least one dimension spans across two contiguous subspaces). Call .contiguous() before .view(). at /pytorch/aten/src/THC/generic/THCTensor.cpp:209
[eval] failed
Number of model parameters: 161111
=> loading checkpoint 'output/All/63674_model=MobilenetV3-ep=3000-block=4/model_best.pth.tar'
=> loaded checkpoint 'output/All/63674_model=MobilenetV3-ep=3000-block=4/model_best.pth.tar' (epoch 2109)
Fatal error in main loop
Traceback (most recent call last):
File "test.py", line 173, in main
run_model(args, q)
File "test.py", line 255, in run_model
prec1 = validate(val_loader, model, criterion, normalize_factor, args ,q)
File "test.py", line 288, in validate
save_error_case(output.data, target, path, args, topk=(1,), input=input, save_correct=False)
File "test.py", line 344, in save_error_case
pred = pred.view(batch_size, 2)
RuntimeError: invalid argument 2: view size is not compatible with input tensor's size and stride (at least one dimension spans across two contiguous subspaces). Call .contiguous() before .view(). at /pytorch/aten/src/THC/generic/THCTensor.cpp:209
[eval] failed
Number of model parameters: 161111
=> loading checkpoint 'output/All/63674_model=MobilenetV3-ep=3000-block=4/model_best.pth.tar'
=> loaded checkpoint 'output/All/63674_model=MobilenetV3-ep=3000-block=4/model_best.pth.tar' (epoch 2109)
Fatal error in main loop
Traceback (most recent call last):
File "test.py", line 173, in main
run_model(args, q)
File "test.py", line 255, in run_model
prec1 = validate(val_loader, model, criterion, normalize_factor, args ,q)
File "test.py", line 288, in validate
save_error_case(output.data, target, path, args, topk=(1,), input=input, save_correct=False)
File "test.py", line 344, in save_error_case
correct = correct.view(batch_size)
RuntimeError: shape '[256]' is invalid for input of size 512
[eval] failed
Number of model parameters: 161111
=> loading checkpoint 'output/All/63674_model=MobilenetV3-ep=3000-block=4/model_best.pth.tar'
=> loaded checkpoint 'output/All/63674_model=MobilenetV3-ep=3000-block=4/model_best.pth.tar' (epoch 2109)
Fatal error in main loop
Traceback (most recent call last):
File "test.py", line 173, in main
run_model(args, q)
File "test.py", line 255, in run_model
prec1 = validate(val_loader, model, criterion, normalize_factor, args ,q)
File "test.py", line 288, in validate
save_error_case(output.data, target, path, args, topk=(1,), input=input, save_correct=False)
File "test.py", line 357, in save_error_case
cv2.imwrite(f"eval_results/{args['task']}/error_case/idx_{error_case_idx}_label_{class_arr[target[idx]]}_pred_{class_arr[pred[idx][0]]}={prob[idx][0]}_{class_arr[pred[idx][1]]}={prob[idx][1]}_real.bmp" ,img)
IndexError: invalid index of a 0-dim tensor. Use tensor.item() to convert a 0-dim tensor to a Python number
[eval] failed
Number of model parameters: 161111
=> loading checkpoint 'output/All/63674_model=MobilenetV3-ep=3000-block=4/model_best.pth.tar'
=> loaded checkpoint 'output/All/63674_model=MobilenetV3-ep=3000-block=4/model_best.pth.tar' (epoch 2109)
Fatal error in main loop
Traceback (most recent call last):
File "test.py", line 173, in main
run_model(args, q)
File "test.py", line 255, in run_model
prec1 = validate(val_loader, model, criterion, normalize_factor, args ,q)
File "test.py", line 288, in validate
save_error_case(output.data, target, path, args, topk=(1,), input=input, save_correct=False)
File "test.py", line 358, in save_error_case
cv2.imwrite(f"eval_results/{args['task']}/error_case/idx_{error_case_idx}_label_{class_arr[target[idx]]}_pred_{class_arr[pred[idx][0]]}={prob[idx][0]}_{class_arr[pred[idx][1]]}={prob[idx][1]}_real.bmp" ,img)
IndexError: invalid index of a 0-dim tensor. Use tensor.item() to convert a 0-dim tensor to a Python number
[eval] failed
Number of model parameters: 161111
=> loading checkpoint 'output/All/63674_model=MobilenetV3-ep=3000-block=4/model_best.pth.tar'
=> loaded checkpoint 'output/All/63674_model=MobilenetV3-ep=3000-block=4/model_best.pth.tar' (epoch 2109)
Test: [0/2] Time 4.322 (4.322) Loss 0.5515 (0.5515) Prec@1 92.969 (92.969)
Test: [1/2] Time 0.048 (2.185) Loss 0.2176 (0.4743) Prec@1 97.403 (93.994)
* Prec@1 93.994
Best accuracy: 93.9939935586832
[eval] done
Number of model parameters: 161111
=> loading checkpoint 'output/All/63674_model=MobilenetV3-ep=3000-block=4/model_best.pth.tar'
=> loaded checkpoint 'output/All/63674_model=MobilenetV3-ep=3000-block=4/model_best.pth.tar' (epoch 2109)
Test: [0/2] Time 4.349 (4.349) Loss 0.5171 (0.5171) Prec@1 93.359 (93.359)
Test: [1/2] Time 0.053 (2.201) Loss 0.3320 (0.4743) Prec@1 96.104 (93.994)
* Prec@1 93.994
Best accuracy: 93.99399422310493
[eval] done
Number of model parameters: 161111
=> loading checkpoint 'output/All/63674_model=MobilenetV3-ep=3000-block=4/model_best.pth.tar'
=> loaded checkpoint 'output/All/63674_model=MobilenetV3-ep=3000-block=4/model_best.pth.tar' (epoch 2109)
Test: [0/2] Time 4.404 (4.404) Loss 0.5640 (0.5640) Prec@1 92.578 (92.578)
Test: [1/2] Time 0.056 (2.230) Loss 0.1761 (0.4743) Prec@1 98.701 (93.994)
* Prec@1 93.994
Best accuracy: 93.99399312337239
[eval] done
Number of model parameters: 161111
=> loading checkpoint 'output/All/63674_model=MobilenetV3-ep=3000-block=4/model_best.pth.tar'
=> loaded checkpoint 'output/All/63674_model=MobilenetV3-ep=3000-block=4/model_best.pth.tar' (epoch 2109)
Test: [0/2] Time 3.512 (3.512) Loss 0.4511 (0.4511) Prec@1 93.750 (93.750)
Test: [1/2] Time 0.045 (1.779) Loss 0.5512 (0.4743) Prec@1 94.805 (93.994)
* Prec@1 93.994
Best accuracy: 93.99399312337239
[eval] done
Number of model parameters: 161111
=> loading checkpoint 'output/All/63674_model=MobilenetV3-ep=3000-block=4/model_best.pth.tar'
=> loaded checkpoint 'output/All/63674_model=MobilenetV3-ep=3000-block=4/model_best.pth.tar' (epoch 2109)
Test: [0/2] Time 3.608 (3.608) Loss 0.5495 (0.5495) Prec@1 93.750 (93.750)
Test: [1/2] Time 0.045 (1.827) Loss 0.2242 (0.4743) Prec@1 94.805 (93.994)
* Prec@1 93.994
Best accuracy: 93.99399312337239
[eval] done
Number of model parameters: 161111
=> loading checkpoint 'output/All/63674_model=MobilenetV3-ep=3000-block=4/model_best.pth.tar'
=> loaded checkpoint 'output/All/63674_model=MobilenetV3-ep=3000-block=4/model_best.pth.tar' (epoch 2109)
Test: [0/2] Time 3.403 (3.403) Loss 0.4165 (0.4165) Prec@1 94.531 (94.531)
Test: [1/2] Time 0.045 (1.724) Loss 0.6662 (0.4743) Prec@1 92.208 (93.994)
* Prec@1 93.994
Best accuracy: 93.99399445221589
[eval] done
Number of model parameters: 161111
Number of model parameters: 161111
=> loading checkpoint 'output/All/40418_model=MobilenetV3-ep=3000-block=4/model_best.pth.tar'
=> loaded checkpoint 'output/All/40418_model=MobilenetV3-ep=3000-block=4/model_best.pth.tar' (epoch 399)
Test: [0/2] Time 3.386 (3.386) Loss 0.3162 (0.3162) Prec@1 94.141 (94.141)
Test: [1/2] Time 0.045 (1.716) Loss 0.3989 (0.3353) Prec@1 92.208 (93.694)
* Prec@1 93.694
Best accuracy: 93.6936941519156
[eval] done
Number of model parameters: 461559
=> loading checkpoint 'output/All/16166_model=MobilenetV3-ep=3000-block=6/model_best.pth.tar'
=> loaded checkpoint 'output/All/16166_model=MobilenetV3-ep=3000-block=6/model_best.pth.tar' (epoch 2382)
Test: [0/2] Time 5.649 (5.649) Loss 0.2832 (0.2832) Prec@1 95.703 (95.703)
Test: [1/2] Time 0.201 (2.925) Loss 0.5106 (0.3358) Prec@1 94.805 (95.495)
* Prec@1 95.495
Best accuracy: 95.49549572460644
[eval] done
Number of model parameters: 461559
=> loading checkpoint 'output/All/16166_model=MobilenetV3-ep=3000-block=6/model_best.pth.tar'
=> loaded checkpoint 'output/All/16166_model=MobilenetV3-ep=3000-block=6/model_best.pth.tar' (epoch 2382)
Test: [0/2] Time 6.114 (6.114) Loss 0.3426 (0.3426) Prec@1 95.703 (95.703)
Test: [1/2] Time 0.141 (3.127) Loss 0.3133 (0.3358) Prec@1 94.805 (95.495)
* Prec@1 95.495
Best accuracy: 95.49549572460644
[eval] done
Number of model parameters: 461559
=> loading checkpoint 'output/All/16166_model=MobilenetV3-ep=3000-block=6/model_best.pth.tar'
=> loaded checkpoint 'output/All/16166_model=MobilenetV3-ep=3000-block=6/model_best.pth.tar' (epoch 2382)
* Prec@1 95.495
* Prec@1 95.495
Best accuracy: 95.49549572460644
[validate_2020-03-26-17-26-14] done
[validate_2020-03-26-17-26-14] done
/home/yh9468/detection/data/Fourth_data/demo Test dir submitted
start test using path : /home/yh9468/detection/data/Fourth_data/demo
Test start
loading checkpoint...
checkpoint already loaded!
start test
data path directory is /home/yh9468/detection/data/Fourth_data/demo
finish test
/home/yh9468/detection/data/Fourth_data/demo/demoset/1-7.bmp test file submitted
start test using path : ('/home/yh9468/detection/data/Fourth_data/demo/demoset/1-7.bmp', 'All Files(*)')
Test start
loading checkpoint...
checkpoint already loaded!
start test
finish test
Number of model parameters: 461559
=> loading checkpoint 'output/All/16166_model=MobilenetV3-ep=3000-block=6/model_best.pth.tar'
=> loaded checkpoint 'output/All/16166_model=MobilenetV3-ep=3000-block=6/model_best.pth.tar' (epoch 2382)
* Prec@1 95.495
* Prec@1 95.495
Best accuracy: 95.49549572460644
[validate_2020-03-26-17-48-44] done
[validate_2020-03-26-17-48-44] done
set error
using default checkpoint
Number of model parameters: 461559
=> loading checkpoint 'output/All/16166_model=MobilenetV3-ep=3000-block=6/model_best.pth.tar'
=> loaded checkpoint 'output/All/16166_model=MobilenetV3-ep=3000-block=6/model_best.pth.tar' (epoch 2382)
Fatal error in main loop
Traceback (most recent call last):
File "E:\code\detection\trainer\test.py", line 270, in main
run_model(args, q)
File "E:\code\detection\trainer\test.py", line 360, in run_model
prec1 = validate(val_loader, model, criterion, normalize_factor, args ,q)
File "E:\code\detection\trainer\test.py", line 394, in validate
save_error_case(output.data, target, path, args, topk=(1,), input=input, save_correct=True)
File "E:\code\detection\trainer\test.py", line 455, in save_error_case
os.mkdir(f"eval_results/{args['task']}")
FileNotFoundError: [WinError 3] 지정된 경로를 찾을 수 없습니다: 'eval_results/All'
[validate_2020-03-31-18-34-56] failed
Number of model parameters: 461559
=> loading checkpoint 'output/All/16166_model=MobilenetV3-ep=3000-block=6/model_best.pth.tar'
=> loaded checkpoint 'output/All/16166_model=MobilenetV3-ep=3000-block=6/model_best.pth.tar' (epoch 2382)
Test: [0/2] Time 118.781 (118.781) Loss 0.3068 (0.3068) Prec@1 95.703 (95.703)
Test: [1/2] Time 3.661 (61.221) Loss 0.4321 (0.3358) Prec@1 94.805 (95.495)
* Prec@1 95.495
* Prec@1 95.495
Best accuracy: 95.49549572460644
[validate_2020-03-31-19-08-47] done
[validate_2020-03-31-19-08-47] done
train start
2020-03-31-19-11-26
use seed 963
use dataset : ../data/Fourth_data/All
{'task': 'All/train_2020-03-31-19-11-26_model=MobilenetV3-ep=3000-block=6', 'modelname': 'MobilenetV3', 'output': 'output', 'checkpoint': 'output/All/16166_model=MobilenetV3-ep=3000-block=6/model_best.pth.tar', 'gpu': [2], 'data': {'train': '../data/Fourth_data/All', 'val': '../data/Fourth_data/All', 'test': '../data/Fourth_data/All'}, 'train': {'epochs': 3000, 'start-epoch': 0, 'batch-size': 256, 'worker': 16, 'weight': [2.0, 4.0, 1.0, 1.0, 3.0, 1.0, 1.0], 'resume': '', 'augment': True, 'size': 224, 'confidence': False}, 'predict': {'batch-size': 256, 'worker': 64, 'cam': False, 'normalize': True, 'save': False}, 'optimizer': {'lr': 0.1, 'momentum': 0.9, 'weight_decay': 0.0001}, 'loss': {'gamma': 2.0, 'alpha': 0.8}, 'model': {'blocks': 6, 'class': 7}, 'etc': {'tensorboard': False, 'print_freq': 10}, 'id': 'train_2020-03-31-19-11-26'}
using default checkpoint
Number of model parameters: 462840
=> loading checkpoint 'output/All/14835_model=MobilenetV3-ep=3000-block=6/model_best.pth.tar'
=> loaded checkpoint 'output/All/14835_model=MobilenetV3-ep=3000-block=6/model_best.pth.tar' (epoch 1617)
Test: [0/3] Time 31.288 (31.288) Loss 0.2660 (0.2660) Prec@1 95.703 (95.703)
Test: [1/3] Time 7.587 (19.437) Loss 0.3209 (0.2934) Prec@1 95.312 (95.508)
Test: [2/3] Time 6.625 (15.167) Loss 0.1835 (0.2602) Prec@1 96.396 (95.777)
* Prec@1 95.777
* Prec@1 95.777
Best accuracy: 95.77656669512757
[validate_2020-04-01-23-00-04] done
[validate_2020-04-01-23-00-04] done
set error
Test를 수행하기 위해 데이터를 입력해 주세요.
Test를 수행하기 위해 데이터를 입력해 주세요.
using user's checkpoint E:/code/detection/trainer/output/All/14835_model=MobilenetV3-ep=3000-block=6/checkpoint.pth.tar
Number of model parameters: 462840
=> loading checkpoint 'E:/code/detection/trainer/output/All/14835_model=MobilenetV3-ep=3000-block=6/checkpoint.pth.tar'
=> loaded checkpoint 'E:/code/detection/trainer/output/All/14835_model=MobilenetV3-ep=3000-block=6/checkpoint.pth.tar' (epoch 3000)
Test: [0/3] Time 32.591 (32.591) Loss 0.1575 (0.1575) Prec@1 94.531 (94.531)
Test: [1/3] Time 8.179 (20.385) Loss 0.2475 (0.2025) Prec@1 93.750 (94.141)
Test: [2/3] Time 7.374 (16.048) Loss 0.4568 (0.2794) Prec@1 94.595 (94.278)
* Prec@1 94.278
* Prec@1 94.278
Best accuracy: 96.04904700754774
[validate_2020-04-03-17-39-50] done
[validate_2020-04-03-17-39-50] done
E:/code/detection/data/Fifth_data/All/Empty/1-5.bmp test file submitted
Test start
start test using path : E:/code/detection/trainer/output/All/14835_model=MobilenetV3-ep=3000-block=6/checkpoint.pth.tar
using user's checkpoint E:/code/detection/trainer/output/All/14835_model=MobilenetV3-ep=3000-block=6/checkpoint.pth.tar
loading checkpoint...
checkpoint already loaded!
start test
single_file_test() missing 1 required positional argument: 'q'
실행 중 에러가 발생하였습니다. 자세한 사항은 보시려면 로그를 확인해 주세요.
Number of model parameters: 154706
=> loading checkpoint 'output/Error/25678_model=MobilenetV3-ep=3000-block=4/checkpoint.pth.tar'
=> loaded checkpoint 'output/Error/25678_model=MobilenetV3-ep=3000-block=4/checkpoint.pth.tar' (epoch 3000)
Test: [0/1] Time 0.609 (0.609) Loss 0.0259 (0.0259) Prec@1 99.408 (99.408)
* Prec@1 99.408
Best accuracy: 100.0
[eval] done
Number of model parameters: 154706
=> loading checkpoint 'output/Error/25678_model=MobilenetV3-ep=3000-block=4/checkpoint.pth.tar'
=> loaded checkpoint 'output/Error/25678_model=MobilenetV3-ep=3000-block=4/checkpoint.pth.tar' (epoch 3000)
Fatal error in main loop
Traceback (most recent call last):
File "eval_binary_model.py", line 68, in main
run_model(args)
File "eval_binary_model.py", line 147, in run_model
prec1 = validate(val_loader, model, criterion, normalize_factor, args)
File "eval_binary_model.py", line 173, in validate
for i, (input, target), (path, _) in enumerate(val_loader):
ValueError: not enough values to unpack (expected 3, got 2)
[eval] failed
Number of model parameters: 154706
=> loading checkpoint 'output/Error/25678_model=MobilenetV3-ep=3000-block=4/checkpoint.pth.tar'
=> loaded checkpoint 'output/Error/25678_model=MobilenetV3-ep=3000-block=4/checkpoint.pth.tar' (epoch 3000)
Test: [0/1] Time 0.615 (0.615) Loss 0.0259 (0.0259) Prec@1 99.408 (99.408)
* Prec@1 99.408
Best accuracy: 100.0
[eval] done
Number of model parameters: 154706
=> loading checkpoint 'output/Error/25678_model=MobilenetV3-ep=3000-block=4/checkpoint.pth.tar'
=> loaded checkpoint 'output/Error/25678_model=MobilenetV3-ep=3000-block=4/checkpoint.pth.tar' (epoch 3000)
Test: [0/1] Time 0.667 (0.667) Loss 0.0259 (0.0259) Prec@1 99.408 (99.408)
* Prec@1 99.408
Best accuracy: 100.0
[eval] done
Number of model parameters: 154706
=> loading checkpoint 'output/Error/26260_model=MobilenetV3-ep=3000-block=4/model_best.pth.tar'
=> loaded checkpoint 'output/Error/26260_model=MobilenetV3-ep=3000-block=4/model_best.pth.tar' (epoch 1085)
Test: [0/1] Time 0.683 (0.683) Loss 17.0075 (17.0075) Prec@1 14.201 (14.201)
* Prec@1 14.201
Best accuracy: 100.0
[eval] done
Number of model parameters: 154706
=> loading checkpoint 'output/Error/26260_model=MobilenetV3-ep=3000-block=4/model_best.pth.tar'
=> loaded checkpoint 'output/Error/26260_model=MobilenetV3-ep=3000-block=4/model_best.pth.tar' (epoch 1085)
Test: [0/1] Time 0.698 (0.698) Loss 6.7597 (6.7597) Prec@1 31.953 (31.953)
* Prec@1 31.953
Creating CAM
Fatal error in main loop
Traceback (most recent call last):
File "eval_binary_model.py", line 56, in main
run_model(args)
File "eval_binary_model.py", line 135, in run_model
prec1 = validate(val_loader, model, criterion, normalize_factor, args)
File "eval_binary_model.py", line 196, in validate
f"eval_results/{args['task']}/error_case/cam" , model, normalize_factor=None)
File "/home/yh9468/detection/trainer/visualize/grad_cam.py", line 164, in make_grad_cam
images = torch.stack(images).to(device)
RuntimeError: expected a non-empty list of Tensors
[eval] failed
Number of model parameters: 154706
=> loading checkpoint 'output/Error/26260_model=MobilenetV3-ep=3000-block=4/model_best.pth.tar'
=> loaded checkpoint 'output/Error/26260_model=MobilenetV3-ep=3000-block=4/model_best.pth.tar' (epoch 1718)
Test: [0/1] Time 0.674 (0.674) Loss 6.5805 (6.5805) Prec@1 32.544 (32.544)
* Prec@1 32.544
Creating CAM
Fatal error in main loop
Traceback (most recent call last):
File "eval_binary_model.py", line 56, in main
run_model(args)
File "eval_binary_model.py", line 135, in run_model
prec1 = validate(val_loader, model, criterion, normalize_factor, args)
File "eval_binary_model.py", line 196, in validate
f"eval_results/{args['task']}/error_case/cam" , model, normalize_factor=None)
File "/home/yh9468/detection/trainer/visualize/grad_cam.py", line 166, in make_grad_cam
images = torch.stack(images).to(device)
RuntimeError: expected a non-empty list of Tensors
[eval] failed
Number of model parameters: 154706
=> loading checkpoint 'output/Error/26260_model=MobilenetV3-ep=3000-block=4/model_best.pth.tar'
=> loaded checkpoint 'output/Error/26260_model=MobilenetV3-ep=3000-block=4/model_best.pth.tar' (epoch 1760)
Test: [0/1] Time 0.709 (0.709) Loss 5.7071 (5.7071) Prec@1 31.953 (31.953)
* Prec@1 31.953
Creating CAM
Fatal error in main loop
Traceback (most recent call last):
File "eval_binary_model.py", line 56, in main
run_model(args)
File "eval_binary_model.py", line 135, in run_model
prec1 = validate(val_loader, model, criterion, normalize_factor, args)
File "eval_binary_model.py", line 196, in validate
f"eval_results/{args['task']}/error_case/cam" , model, normalize_factor=None)
File "/home/yh9468/detection/trainer/visualize/grad_cam.py", line 165, in make_grad_cam
images = torch.stack(images).to(device)
RuntimeError: expected a non-empty list of Tensors
[eval] failed
Number of model parameters: 154706
=> loading checkpoint 'output/Error/26260_model=MobilenetV3-ep=3000-block=4/model_best.pth.tar'
=> loaded checkpoint 'output/Error/26260_model=MobilenetV3-ep=3000-block=4/model_best.pth.tar' (epoch 1843)
Test: [0/1] Time 0.696 (0.696) Loss 7.9185 (7.9185) Prec@1 31.361 (31.361)
* Prec@1 31.361
Creating CAM
Fatal error in main loop
Traceback (most recent call last):
File "eval_binary_model.py", line 56, in main
run_model(args)
File "eval_binary_model.py", line 135, in run_model
prec1 = validate(val_loader, model, criterion, normalize_factor, args)
File "eval_binary_model.py", line 196, in validate
f"eval_results/{args['task']}/error_case/cam" , model, normalize_factor=None)
File "/home/yh9468/detection/trainer/visualize/grad_cam.py", line 163, in make_grad_cam
images, raw_images, image_paths = load_images(image_paths, normalize_factor)
File "/home/yh9468/detection/trainer/visualize/grad_cam.py", line 56, in load_images
image, raw_image = preprocess(image_path, normalize_factor)
File "/home/yh9468/detection/trainer/visualize/grad_cam.py", line 80, in preprocess
raw_image = cv2.resize(raw_image, (64,) * 2)
cv2.error: OpenCV(4.0.0) /io/opencv/modules/imgproc/src/resize.cpp:3784: error: (-215:Assertion failed) !ssize.empty() in function 'resize'
[eval] failed
Number of model parameters: 154706
=> loading checkpoint 'output/Error/26260_model=MobilenetV3-ep=3000-block=4/model_best.pth.tar'
=> loaded checkpoint 'output/Error/26260_model=MobilenetV3-ep=3000-block=4/model_best.pth.tar' (epoch 1843)
Test: [0/1] Time 0.685 (0.685) Loss 7.9185 (7.9185) Prec@1 31.361 (31.361)
* Prec@1 31.361
Creating CAM
Fatal error in main loop
Traceback (most recent call last):
File "eval_binary_model.py", line 56, in main
run_model(args)
File "eval_binary_model.py", line 135, in run_model
prec1 = validate(val_loader, model, criterion, normalize_factor, args)
File "eval_binary_model.py", line 196, in validate
f"eval_results/{args['task']}/error_case/cam" , model, normalize_factor=None)
File "/home/yh9468/detection/trainer/visualize/grad_cam.py", line 164, in make_grad_cam
images, raw_images, image_paths = load_images(image_paths, normalize_factor)
File "/home/yh9468/detection/trainer/visualize/grad_cam.py", line 57, in load_images
image, raw_image = preprocess(image_path, normalize_factor)
File "/home/yh9468/detection/trainer/visualize/grad_cam.py", line 81, in preprocess
raw_image = cv2.resize(raw_image, (64,) * 2)
cv2.error: OpenCV(4.0.0) /io/opencv/modules/imgproc/src/resize.cpp:3784: error: (-215:Assertion failed) !ssize.empty() in function 'resize'
[eval] failed
Number of model parameters: 154706
=> loading checkpoint 'output/Error/26260_model=MobilenetV3-ep=3000-block=4/model_best.pth.tar'
=> loaded checkpoint 'output/Error/26260_model=MobilenetV3-ep=3000-block=4/model_best.pth.tar' (epoch 1843)
Test: [0/1] Time 0.666 (0.666) Loss 7.9185 (7.9185) Prec@1 31.361 (31.361)
* Prec@1 31.361
Creating CAM
Best accuracy: 100.0
[eval] done
Number of model parameters: 154706
=> loading checkpoint 'output/Error/26260_model=MobilenetV3-ep=3000-block=4/model_best.pth.tar'
=> loaded checkpoint 'output/Error/26260_model=MobilenetV3-ep=3000-block=4/model_best.pth.tar' (epoch 2117)
Test: [0/1] Time 0.676 (0.676) Loss 0.0074 (0.0074) Prec@1 100.000 (100.000)
* Prec@1 100.000
Creating CAM
Fatal error in main loop
Traceback (most recent call last):
File "eval_binary_model.py", line 56, in main
run_model(args)
File "eval_binary_model.py", line 135, in run_model
prec1 = validate(val_loader, model, criterion, normalize_factor, args)
File "eval_binary_model.py", line 196, in validate
f"eval_results/{args['task']}/error_case/cam" , model, normalize_factor=None)
File "/home/yh9468/detection/trainer/visualize/grad_cam.py", line 166, in make_grad_cam
images = torch.stack(images).to(device)
RuntimeError: expected a non-empty list of Tensors
[eval] failed
Number of model parameters: 154706
=> loading checkpoint 'output/Error/26260_model=MobilenetV3-ep=3000-block=4/model_best.pth.tar'
=> loaded checkpoint 'output/Error/26260_model=MobilenetV3-ep=3000-block=4/model_best.pth.tar' (epoch 2195)
Test: [0/6] Time 0.754 (0.754) Loss 0.0064 (0.0064) Prec@1 100.000 (100.000)
* Prec@1 99.606
Creating CAM
Best accuracy: 100.0
[eval] done
Number of model parameters: 154706
Number of model parameters: 154706
=> loading checkpoint 'output/Error/26260_model=MobilenetV3-ep=3000-block=4/model_best.pth.tar'
=> loaded checkpoint 'output/Error/26260_model=MobilenetV3-ep=3000-block=4/model_best.pth.tar' (epoch 2995)
Test: [0/1] Time 0.688 (0.688) Loss 0.0074 (0.0074) Prec@1 100.000 (100.000)
* Prec@1 100.000
Creating CAM
Best accuracy: 100.0
[eval] done
Number of model parameters: 154706
=> loading checkpoint 'output/Error/26260_model=MobilenetV3-ep=3000-block=4/model_best.pth.tar'
=> loaded checkpoint 'output/Error/26260_model=MobilenetV3-ep=3000-block=4/model_best.pth.tar' (epoch 2995)
Test: [0/1] Time 0.694 (0.694) Loss 0.0074 (0.0074) Prec@1 100.000 (100.000)
* Prec@1 100.000
Creating CAM
Fatal error in main loop
Traceback (most recent call last):
File "test.py", line 171, in main
run_model(args, q)
File "test.py", line 249, in run_model
prec1 = validate(val_loader, model, criterion, normalize_factor, args ,q)
File "test.py", line 310, in validate
f"eval_results/{args['task']}/error_case/cam" , model, normalize_factor=None, cam_class=args['predict']['cam-class'])
File "/home/yh9468/detection/trainer/visualize/grad_cam.py", line 168, in make_grad_cam
images = torch.stack(images).to(device)
RuntimeError: expected a non-empty list of Tensors
[eval] failed
Number of model parameters: 154706
=> loading checkpoint 'output/Error/26260_model=MobilenetV3-ep=3000-block=4/model_best.pth.tar'
=> loaded checkpoint 'output/Error/26260_model=MobilenetV3-ep=3000-block=4/model_best.pth.tar' (epoch 2995)
Test: [0/7] Time 0.772 (0.772) Loss 0.0048 (0.0048) Prec@1 100.000 (100.000)
* Prec@1 99.882
Creating CAM
Best accuracy: 100.0
[eval] done
Number of model parameters: 154706
=> loading checkpoint 'output/Error/26260_model=MobilenetV3-ep=3000-block=4/model_best.pth.tar'
=> loaded checkpoint 'output/Error/26260_model=MobilenetV3-ep=3000-block=4/model_best.pth.tar' (epoch 2995)
Test: [0/7] Time 0.767 (0.767) Loss 0.0024 (0.0024) Prec@1 100.000 (100.000)
* Prec@1 99.882
Creating CAM
Best accuracy: 100.0
[eval] done
Number of model parameters: 154706
=> loading checkpoint 'output/Error/30297_model=MobilenetV3-ep=3000-block=4/model_best.pth.tar'
=> loaded checkpoint 'output/Error/30297_model=MobilenetV3-ep=3000-block=4/model_best.pth.tar' (epoch 2888)
Test: [0/22] Time 1.470 (1.470) Loss 11.2787 (11.2787) Prec@1 51.172 (51.172)
Test: [10/22] Time 0.152 (0.286) Loss 11.5936 (11.1885) Prec@1 51.953 (52.273)
Test: [20/22] Time 0.139 (0.259) Loss 11.3118 (11.2861) Prec@1 49.609 (52.269)
* Prec@1 52.311
Creating CAM
Fatal error in main loop
Traceback (most recent call last):
File "test.py", line 171, in main
run_model(args, q)
File "test.py", line 249, in run_model
prec1 = validate(val_loader, model, criterion, normalize_factor, args ,q)
File "test.py", line 310, in validate
f"eval_results/{args['task']}/error_case/cam" , model, normalize_factor=None, cam_class=args['predict']['cam-class'])
File "/home/yh9468/detection/trainer/visualize/grad_cam.py", line 197, in make_grad_cam
_ = gcam.forward(images)
File "/home/yh9468/detection/trainer/visualize/grad_cam_utils.py", line 31, in forward
self.logits = self.model(image)
File "/usr/local/lib/python3.6/dist-packages/torch/nn/modules/module.py", line 493, in __call__
result = self.forward(*input, **kwargs)
File "/usr/local/lib/python3.6/dist-packages/torch/nn/parallel/data_parallel.py", line 150, in forward
return self.module(*inputs[0], **kwargs[0])
File "/usr/local/lib/python3.6/dist-packages/torch/nn/modules/module.py", line 493, in __call__
result = self.forward(*input, **kwargs)
File "/home/yh9468/detection/trainer/model.py", line 170, in forward
x = self.features(x)
File "/usr/local/lib/python3.6/dist-packages/torch/nn/modules/module.py", line 493, in __call__
result = self.forward(*input, **kwargs)
File "/usr/local/lib/python3.6/dist-packages/torch/nn/modules/container.py", line 92, in forward
input = module(input)
File "/usr/local/lib/python3.6/dist-packages/torch/nn/modules/module.py", line 493, in __call__
result = self.forward(*input, **kwargs)
File "/home/yh9468/detection/trainer/model.py", line 31, in forward
return x * F.relu6(x + 3., inplace=self.inplace) / 6.
RuntimeError: CUDA out of memory. Tried to allocate 26.00 MiB (GPU 0; 10.92 GiB total capacity; 9.45 GiB already allocated; 6.56 MiB free; 52.02 MiB cached)
[eval] failed
Number of model parameters: 154706
=> loading checkpoint 'output/Error/30297_model=MobilenetV3-ep=3000-block=4/model_best.pth.tar'
=> loaded checkpoint 'output/Error/30297_model=MobilenetV3-ep=3000-block=4/model_best.pth.tar' (epoch 2888)
Test: [0/7] Time 1.322 (1.322) Loss 16.0815 (16.0815) Prec@1 38.672 (38.672)
* Prec@1 31.915
Creating CAM
Best accuracy: 100.0
[eval] done
Number of model parameters: 154706
=> loading checkpoint 'output/Error/30297_model=MobilenetV3-ep=3000-block=4/model_best.pth.tar'
=> loaded checkpoint 'output/Error/30297_model=MobilenetV3-ep=3000-block=4/model_best.pth.tar' (epoch 2888)
Test: [0/7] Time 2.113 (2.113) Loss 17.8164 (17.8164) Prec@1 30.859 (30.859)
* Prec@1 31.915
Creating CAM
Best accuracy: 100.0
[eval] done
Number of model parameters: 154706
=> loading checkpoint 'output/Error/30297_model=MobilenetV3-ep=3000-block=4/model_best.pth.tar'
=> loaded checkpoint 'output/Error/30297_model=MobilenetV3-ep=3000-block=4/model_best.pth.tar' (epoch 2888)
Test: [0/7] Time 1.314 (1.314) Loss 5.2641 (5.2641) Prec@1 60.156 (60.156)
* Prec@1 59.338
Creating CAM
Best accuracy: 100.0
[eval] done
Number of model parameters: 154706
=> loading checkpoint 'output/Error/30297_model=MobilenetV3-ep=3000-block=4/model_best.pth.tar'
=> loaded checkpoint 'output/Error/30297_model=MobilenetV3-ep=3000-block=4/model_best.pth.tar' (epoch 2888)
Test: [0/7] Time 1.537 (1.537) Loss 2.6861 (2.6861) Prec@1 74.609 (74.609)
* Prec@1 80.142
Creating CAM
Best accuracy: 100.0
[eval] done
Number of model parameters: 154706
=> loading checkpoint 'output/Error/30297_model=MobilenetV3-ep=3000-block=4/model_best.pth.tar'
=> loaded checkpoint 'output/Error/30297_model=MobilenetV3-ep=3000-block=4/model_best.pth.tar' (epoch 2888)
Test: [0/7] Time 2.039 (2.039) Loss 0.0138 (0.0138) Prec@1 99.219 (99.219)
* Prec@1 99.764
Creating CAM
Best accuracy: 100.0
[eval] done
Number of model parameters: 154706
=> loading checkpoint 'output/Error/30297_model=MobilenetV3-ep=3000-block=4/model_best.pth.tar'
=> loaded checkpoint 'output/Error/30297_model=MobilenetV3-ep=3000-block=4/model_best.pth.tar' (epoch 2888)
Test: [0/7] Time 1.235 (1.235) Loss 0.0215 (0.0215) Prec@1 99.609 (99.609)
Test: [1/7] Time 0.158 (0.697) Loss 0.0061 (0.0138) Prec@1 100.000 (99.805)
Test: [2/7] Time 0.156 (0.517) Loss 0.0033 (0.0103) Prec@1 100.000 (99.870)
Test: [3/7] Time 0.156 (0.427) Loss 0.0044 (0.0088) Prec@1 100.000 (99.902)
Test: [4/7] Time 0.157 (0.373) Loss 0.0091 (0.0089) Prec@1 99.609 (99.844)
Test: [5/7] Time 0.213 (0.346) Loss 0.0090 (0.0089) Prec@1 99.609 (99.805)
Test: [6/7] Time 0.179 (0.322) Loss 0.0216 (0.0101) Prec@1 99.359 (99.764)
* Prec@1 99.764
Creating CAM
Best accuracy: 100.0
[eval] done
Number of model parameters: 154706
=> loading checkpoint 'output/Error/85219_model=MobilenetV3-ep=3000-block=4/checkpoint.pth.tar'
=> loaded checkpoint 'output/Error/85219_model=MobilenetV3-ep=3000-block=4/checkpoint.pth.tar' (epoch 3000)
Test: [0/7] Time 0.948 (0.948) Loss 0.0047 (0.0047) Prec@1 99.609 (99.609)
Test: [1/7] Time 0.152 (0.550) Loss 0.0033 (0.0040) Prec@1 99.609 (99.609)
Test: [2/7] Time 0.153 (0.418) Loss 0.0287 (0.0122) Prec@1 99.219 (99.479)
Test: [3/7] Time 0.152 (0.351) Loss 0.0358 (0.0181) Prec@1 99.609 (99.512)
Test: [4/7] Time 0.151 (0.311) Loss 0.0010 (0.0147) Prec@1 100.000 (99.609)
Test: [5/7] Time 0.152 (0.285) Loss 0.0004 (0.0123) Prec@1 100.000 (99.674)
Test: [6/7] Time 0.167 (0.268) Loss 0.0029 (0.0114) Prec@1 100.000 (99.704)
* Prec@1 99.704
Creating CAM
Best accuracy: 100.0
[eval] done
Number of model parameters: 154706
=> loading checkpoint 'output/Error/85219_model=MobilenetV3-ep=3000-block=4/checkpoint.pth.tar'
=> loaded checkpoint 'output/Error/85219_model=MobilenetV3-ep=3000-block=4/checkpoint.pth.tar' (epoch 3000)
Test: [0/7] Time 0.754 (0.754) Loss 0.0067 (0.0067) Prec@1 99.609 (99.609)
Test: [1/7] Time 0.021 (0.387) Loss 0.0374 (0.0221) Prec@1 99.609 (99.609)
Test: [2/7] Time 0.016 (0.264) Loss 0.0039 (0.0160) Prec@1 99.609 (99.609)
Test: [3/7] Time 0.016 (0.202) Loss 0.0048 (0.0132) Prec@1 99.609 (99.609)
Test: [4/7] Time 0.016 (0.165) Loss 0.0219 (0.0149) Prec@1 99.609 (99.609)
Test: [5/7] Time 0.015 (0.140) Loss 0.0004 (0.0125) Prec@1 100.000 (99.674)
Test: [6/7] Time 0.076 (0.131) Loss 0.0009 (0.0114) Prec@1 100.000 (99.704)
* Prec@1 99.704
Creating CAM
Best accuracy: 100.0
[eval] done
Number of model parameters: 154706
=> loading checkpoint 'output/Error/85219_model=MobilenetV3-ep=3000-block=4/checkpoint.pth.tar'
=> loaded checkpoint 'output/Error/85219_model=MobilenetV3-ep=3000-block=4/checkpoint.pth.tar' (epoch 3000)
Test: [0/7] Time 0.749 (0.749) Loss 0.0039 (0.0039) Prec@1 99.609 (99.609)
Test: [1/7] Time 0.019 (0.384) Loss 0.0214 (0.0126) Prec@1 99.609 (99.609)
Test: [2/7] Time 0.016 (0.261) Loss 0.0062 (0.0105) Prec@1 99.609 (99.609)
Test: [3/7] Time 0.015 (0.199) Loss 0.0009 (0.0081) Prec@1 100.000 (99.707)
Test: [4/7] Time 0.015 (0.162) Loss 0.0016 (0.0068) Prec@1 100.000 (99.766)
Test: [5/7] Time 0.016 (0.138) Loss 0.0397 (0.0123) Prec@1 99.219 (99.674)
Test: [6/7] Time 0.074 (0.129) Loss 0.0031 (0.0114) Prec@1 100.000 (99.704)
* Prec@1 99.704
Creating CAM
Best accuracy: 100.0
[eval] done
Number of model parameters: 154706
=> loading checkpoint 'output/Error/85219_model=MobilenetV3-ep=3000-block=4/checkpoint.pth.tar'
=> loaded checkpoint 'output/Error/85219_model=MobilenetV3-ep=3000-block=4/checkpoint.pth.tar' (epoch 3000)
Test: [0/7] Time 0.736 (0.736) Loss 0.0660 (0.0660) Prec@1 98.438 (98.438)
Test: [1/7] Time 0.017 (0.376) Loss 0.0008 (0.0334) Prec@1 100.000 (99.219)
Test: [2/7] Time 0.016 (0.256) Loss 0.0045 (0.0238) Prec@1 99.609 (99.349)
Test: [3/7] Time 0.016 (0.196) Loss 0.0006 (0.0180) Prec@1 100.000 (99.512)
Test: [4/7] Time 0.016 (0.160) Loss 0.0007 (0.0145) Prec@1 100.000 (99.609)
Test: [5/7] Time 0.015 (0.136) Loss 0.0018 (0.0124) Prec@1 100.000 (99.674)
Test: [6/7] Time 0.097 (0.130) Loss 0.0019 (0.0114) Prec@1 100.000 (99.704)
* Prec@1 99.704
Creating CAM
Fatal error in main loop
Traceback (most recent call last):
File "test.py", line 171, in main
run_model(args, q)
File "test.py", line 249, in run_model
prec1 = validate(val_loader, model, criterion, normalize_factor, args ,q)
File "test.py", line 307, in validate
f"eval_results/{args['task']}/error_case/cam" , model, normalize_factor, cam_class=args['predict']['cam-class'])
File "/home/yh9468/detection/trainer/visualize/grad_cam.py", line 174, in make_grad_cam
cv2.imwrite(f"eval_results/Error/error_case/check/{image_path.split('/')[-1]}", raw_image)
cv2.error: OpenCV(4.0.0) /io/opencv/modules/imgcodecs/src/loadsave.cpp:661: error: (-2:Unspecified error) could not find a writer for the specified extension in function 'imwrite_'
[eval] failed
Number of model parameters: 154706
=> loading checkpoint 'output/Error/85219_model=MobilenetV3-ep=3000-block=4/checkpoint.pth.tar'
=> loaded checkpoint 'output/Error/85219_model=MobilenetV3-ep=3000-block=4/checkpoint.pth.tar' (epoch 3000)
Test: [0/7] Time 0.717 (0.717) Loss 0.0004 (0.0004) Prec@1 100.000 (100.000)
Test: [1/7] Time 0.026 (0.372) Loss 0.0005 (0.0004) Prec@1 100.000 (100.000)
Test: [2/7] Time 0.016 (0.253) Loss 0.0010 (0.0006) Prec@1 100.000 (100.000)
Test: [3/7] Time 0.023 (0.196) Loss 0.0079 (0.0025) Prec@1 99.219 (99.805)
Test: [4/7] Time 0.016 (0.160) Loss 0.0232 (0.0066) Prec@1 99.609 (99.766)
Test: [5/7] Time 0.016 (0.136) Loss 0.0366 (0.0116) Prec@1 99.609 (99.740)
Test: [6/7] Time 0.080 (0.128) Loss 0.0098 (0.0114) Prec@1 99.359 (99.704)
* Prec@1 99.704
Creating CAM
Fatal error in main loop
Traceback (most recent call last):
File "test.py", line 171, in main
run_model(args, q)
File "test.py", line 249, in run_model
prec1 = validate(val_loader, model, criterion, normalize_factor, args ,q)
File "test.py", line 307, in validate
f"eval_results/{args['task']}/error_case/cam" , model, normalize_factor, cam_class=args['predict']['cam-class'])
File "/home/yh9468/detection/trainer/visualize/grad_cam.py", line 167, in make_grad_cam
images, raw_images, image_paths = load_images(image_paths, normalize_factor)
File "/home/yh9468/detection/trainer/visualize/grad_cam.py", line 58, in load_images
image, raw_image = preprocess(image_path, normalize_factor)
File "/home/yh9468/detection/trainer/visualize/grad_cam.py", line 82, in preprocess
raw_image = cv2.resize(raw_image, (128,) * 2)
cv2.error: OpenCV(4.0.0) /io/opencv/modules/imgproc/src/resize.cpp:3784: error: (-215:Assertion failed) !ssize.empty() in function 'resize'
[eval] failed
Number of model parameters: 154706
=> loading checkpoint 'output/Error/85219_model=MobilenetV3-ep=3000-block=4/checkpoint.pth.tar'
=> loaded checkpoint 'output/Error/85219_model=MobilenetV3-ep=3000-block=4/checkpoint.pth.tar' (epoch 3000)
Test: [0/7] Time 0.762 (0.762) Loss 0.0016 (0.0016) Prec@1 100.000 (100.000)
Test: [1/7] Time 0.026 (0.394) Loss 0.0004 (0.0010) Prec@1 100.000 (100.000)
Test: [2/7] Time 0.023 (0.270) Loss 0.0092 (0.0038) Prec@1 99.219 (99.740)
Test: [3/7] Time 0.016 (0.207) Loss 0.0048 (0.0040) Prec@1 99.609 (99.707)
Test: [4/7] Time 0.016 (0.169) Loss 0.0223 (0.0077) Prec@1 99.609 (99.688)
Test: [5/7] Time 0.021 (0.144) Loss 0.0369 (0.0125) Prec@1 99.609 (99.674)
Test: [6/7] Time 0.081 (0.135) Loss 0.0006 (0.0114) Prec@1 100.000 (99.704)
* Prec@1 99.704
Creating CAM
Fatal error in main loop
Traceback (most recent call last):
File "test.py", line 171, in main
run_model(args, q)
File "test.py", line 249, in run_model
prec1 = validate(val_loader, model, criterion, normalize_factor, args ,q)
File "test.py", line 307, in validate
f"eval_results/{args['task']}/error_case/cam" , model, normalize_factor, cam_class=args['predict']['cam-class'])
File "/home/yh9468/detection/trainer/visualize/grad_cam.py", line 175, in make_grad_cam
cv2.imwrite(f"eval_results/Error/error_case/check/{image_path.split('/')[-1]}", raw_image)
cv2.error: OpenCV(4.0.0) /io/opencv/modules/imgcodecs/src/loadsave.cpp:661: error: (-2:Unspecified error) could not find a writer for the specified extension in function 'imwrite_'
[eval] failed
Number of model parameters: 154706
=> loading checkpoint 'output/Error/85219_model=MobilenetV3-ep=3000-block=4/checkpoint.pth.tar'
=> loaded checkpoint 'output/Error/85219_model=MobilenetV3-ep=3000-block=4/checkpoint.pth.tar' (epoch 3000)
Test: [0/7] Time 0.734 (0.734) Loss 0.0066 (0.0066) Prec@1 99.219 (99.219)
Test: [1/7] Time 0.019 (0.376) Loss 0.0215 (0.0140) Prec@1 99.609 (99.414)
Test: [2/7] Time 0.015 (0.256) Loss 0.0006 (0.0095) Prec@1 100.000 (99.609)
Test: [3/7] Time 0.015 (0.196) Loss 0.0031 (0.0079) Prec@1 100.000 (99.707)
Test: [4/7] Time 0.016 (0.160) Loss 0.0362 (0.0136) Prec@1 99.609 (99.688)
Test: [5/7] Time 0.016 (0.136) Loss 0.0062 (0.0124) Prec@1 99.609 (99.674)
Test: [6/7] Time 0.082 (0.128) Loss 0.0024 (0.0114) Prec@1 100.000 (99.704)
* Prec@1 99.704
Creating CAM
Fatal error in main loop
Traceback (most recent call last):
File "test.py", line 171, in main
run_model(args, q)
File "test.py", line 249, in run_model
prec1 = validate(val_loader, model, criterion, normalize_factor, args ,q)
File "test.py", line 307, in validate
f"eval_results/{args['task']}/error_case/cam" , model, normalize_factor, cam_class=args['predict']['cam-class'])
File "/home/yh9468/detection/trainer/visualize/grad_cam.py", line 168, in make_grad_cam
images, raw_images, image_paths = load_images(image_paths, normalize_factor)
File "/home/yh9468/detection/trainer/visualize/grad_cam.py", line 57, in load_images
del image_path[i]
TypeError: 'str' object doesn't support item deletion
[eval] failed
Number of model parameters: 154706
=> loading checkpoint 'output/Error/85219_model=MobilenetV3-ep=3000-block=4/checkpoint.pth.tar'
=> loaded checkpoint 'output/Error/85219_model=MobilenetV3-ep=3000-block=4/checkpoint.pth.tar' (epoch 3000)
Test: [0/7] Time 0.737 (0.737) Loss 0.0036 (0.0036) Prec@1 100.000 (100.000)
Test: [1/7] Time 0.033 (0.385) Loss 0.0066 (0.0051) Prec@1 99.609 (99.805)
Test: [2/7] Time 0.016 (0.262) Loss 0.0033 (0.0045) Prec@1 99.609 (99.740)
Test: [3/7] Time 0.015 (0.200) Loss 0.0010 (0.0036) Prec@1 100.000 (99.805)
Test: [4/7] Time 0.015 (0.163) Loss 0.0009 (0.0031) Prec@1 100.000 (99.844)
Test: [5/7] Time 0.015 (0.139) Loss 0.0008 (0.0027) Prec@1 100.000 (99.870)
Test: [6/7] Time 0.070 (0.129) Loss 0.0976 (0.0114) Prec@1 98.077 (99.704)
* Prec@1 99.704
Creating CAM
Best accuracy: 100.0
[eval] done
Number of model parameters: 154706
=> loading checkpoint 'output/Error/85219_model=MobilenetV3-ep=3000-block=4/checkpoint.pth.tar'
=> loaded checkpoint 'output/Error/85219_model=MobilenetV3-ep=3000-block=4/checkpoint.pth.tar' (epoch 3000)
Test: [0/7] Time 0.751 (0.751) Loss 0.0568 (0.0568) Prec@1 99.219 (99.219)
Test: [1/7] Time 0.017 (0.384) Loss 0.0015 (0.0292) Prec@1 100.000 (99.609)
Test: [2/7] Time 0.016 (0.261) Loss 0.0007 (0.0197) Prec@1 100.000 (99.740)
Test: [3/7] Time 0.016 (0.200) Loss 0.0038 (0.0157) Prec@1 99.609 (99.707)
Test: [4/7] Time 0.015 (0.163) Loss 0.0016 (0.0129) Prec@1 100.000 (99.766)
Test: [5/7] Time 0.017 (0.139) Loss 0.0111 (0.0126) Prec@1 99.219 (99.674)
Test: [6/7] Time 0.104 (0.134) Loss 0.0002 (0.0114) Prec@1 100.000 (99.704)
* Prec@1 99.704
Creating CAM
Best accuracy: 100.0
[eval] done
Number of model parameters: 154706
=> loading checkpoint 'output/Error/85219_model=MobilenetV3-ep=3000-block=4/checkpoint.pth.tar'
=> loaded checkpoint 'output/Error/85219_model=MobilenetV3-ep=3000-block=4/checkpoint.pth.tar' (epoch 3000)
Test: [0/7] Time 0.756 (0.756) Loss 0.0072 (0.0072) Prec@1 99.219 (99.219)
Test: [1/7] Time 0.018 (0.387) Loss 0.0063 (0.0068) Prec@1 99.609 (99.414)
Test: [2/7] Time 0.017 (0.264) Loss 0.0576 (0.0237) Prec@1 99.219 (99.349)
Test: [3/7] Time 0.015 (0.202) Loss 0.0002 (0.0179) Prec@1 100.000 (99.512)
Test: [4/7] Time 0.015 (0.164) Loss 0.0006 (0.0144) Prec@1 100.000 (99.609)
Test: [5/7] Time 0.015 (0.139) Loss 0.0027 (0.0125) Prec@1 100.000 (99.674)
Test: [6/7] Time 0.088 (0.132) Loss 0.0014 (0.0114) Prec@1 100.000 (99.704)
* Prec@1 99.704
Creating CAM
Best accuracy: 100.0
[eval] done
Number of model parameters: 154706
=> loading checkpoint 'output/Error/85219_model=MobilenetV3-ep=3000-block=4/checkpoint.pth.tar'
=> loaded checkpoint 'output/Error/85219_model=MobilenetV3-ep=3000-block=4/checkpoint.pth.tar' (epoch 3000)
Test: [0/7] Time 0.719 (0.719) Loss 0.0020 (0.0020) Prec@1 100.000 (100.000)
Test: [1/7] Time 0.035 (0.377) Loss 0.0047 (0.0033) Prec@1 99.609 (99.805)
Test: [2/7] Time 0.016 (0.256) Loss 0.0006 (0.0024) Prec@1 100.000 (99.870)
Test: [3/7] Time 0.016 (0.196) Loss 0.0359 (0.0108) Prec@1 99.609 (99.805)
Test: [4/7] Time 0.015 (0.160) Loss 0.0012 (0.0089) Prec@1 100.000 (99.844)
Test: [5/7] Time 0.016 (0.136) Loss 0.0071 (0.0086) Prec@1 99.609 (99.805)
Test: [6/7] Time 0.093 (0.130) Loss 0.0396 (0.0114) Prec@1 98.718 (99.704)
* Prec@1 99.704
Creating CAM
Best accuracy: 100.0
[eval] done
Number of model parameters: 154706
=> loading checkpoint 'output/Error/85219_model=MobilenetV3-ep=3000-block=4/checkpoint.pth.tar'
=> loaded checkpoint 'output/Error/85219_model=MobilenetV3-ep=3000-block=4/checkpoint.pth.tar' (epoch 3000)
Test: [0/7] Time 0.746 (0.746) Loss 0.0003 (0.0003) Prec@1 100.000 (100.000)
Test: [1/7] Time 0.026 (0.386) Loss 0.0004 (0.0004) Prec@1 100.000 (100.000)
Test: [2/7] Time 0.032 (0.268) Loss 0.0264 (0.0091) Prec@1 99.219 (99.740)
Test: [3/7] Time 0.017 (0.205) Loss 0.0414 (0.0171) Prec@1 99.219 (99.609)
Test: [4/7] Time 0.015 (0.167) Loss 0.0030 (0.0143) Prec@1 100.000 (99.688)
Test: [5/7] Time 0.020 (0.142) Loss 0.0038 (0.0126) Prec@1 99.609 (99.674)
Test: [6/7] Time 0.079 (0.133) Loss 0.0005 (0.0114) Prec@1 100.000 (99.704)
* Prec@1 99.704
Creating CAM
Best accuracy: 100.0
[eval] done
Number of model parameters: 154706
=> loading checkpoint 'output/Error/85219_model=MobilenetV3-ep=3000-block=4/checkpoint.pth.tar'
=> loaded checkpoint 'output/Error/85219_model=MobilenetV3-ep=3000-block=4/checkpoint.pth.tar' (epoch 3000)
Test: [0/7] Time 0.781 (0.781) Loss 0.0231 (0.0231) Prec@1 99.609 (99.609)
Test: [1/7] Time 0.019 (0.400) Loss 0.0072 (0.0152) Prec@1 99.219 (99.414)
Test: [2/7] Time 0.016 (0.272) Loss 0.0004 (0.0102) Prec@1 100.000 (99.609)
Test: [3/7] Time 0.015 (0.208) Loss 0.0014 (0.0080) Prec@1 100.000 (99.707)
Test: [4/7] Time 0.016 (0.169) Loss 0.0062 (0.0077) Prec@1 99.609 (99.688)
Test: [5/7] Time 0.019 (0.144) Loss 0.0005 (0.0065) Prec@1 100.000 (99.740)
Test: [6/7] Time 0.095 (0.137) Loss 0.0605 (0.0114) Prec@1 99.359 (99.704)
* Prec@1 99.704
Creating CAM
Best accuracy: 100.0
[eval] done
Number of model parameters: 154706
=> loading checkpoint 'output/Error/85219_model=MobilenetV3-ep=3000-block=4/checkpoint.pth.tar'
=> loaded checkpoint 'output/Error/85219_model=MobilenetV3-ep=3000-block=4/checkpoint.pth.tar' (epoch 3000)
Test: [0/7] Time 0.758 (0.758) Loss 0.0105 (0.0105) Prec@1 99.219 (99.219)
Test: [1/7] Time 0.017 (0.388) Loss 0.0006 (0.0056) Prec@1 100.000 (99.609)
Test: [2/7] Time 0.017 (0.264) Loss 0.0216 (0.0109) Prec@1 99.609 (99.609)
Test: [3/7] Time 0.016 (0.202) Loss 0.0360 (0.0172) Prec@1 99.609 (99.609)
Test: [4/7] Time 0.016 (0.165) Loss 0.0015 (0.0140) Prec@1 100.000 (99.688)
Test: [5/7] Time 0.016 (0.140) Loss 0.0048 (0.0125) Prec@1 99.609 (99.674)
Test: [6/7] Time 0.077 (0.131) Loss 0.0011 (0.0114) Prec@1 100.000 (99.704)
* Prec@1 99.704
Creating CAM
Fatal error in main loop
Traceback (most recent call last):
File "test.py", line 171, in main
run_model(args, q)
File "test.py", line 249, in run_model
prec1 = validate(val_loader, model, criterion, normalize_factor, args ,q)
File "test.py", line 307, in validate
f"eval_results/{args['task']}/error_case/cam" , model, normalize_factor, cam_class=args['predict']['cam-class'])
File "/home/yh9468/detection/trainer/visualize/grad_cam.py", line 163, in make_grad_cam
image_paths.remove('cam')
ValueError: list.remove(x): x not in list
[eval] failed
Number of model parameters: 154706
=> loading checkpoint 'output/Error/85219_model=MobilenetV3-ep=3000-block=4/checkpoint.pth.tar'
=> loaded checkpoint 'output/Error/85219_model=MobilenetV3-ep=3000-block=4/checkpoint.pth.tar' (epoch 3000)
Test: [0/7] Time 0.812 (0.812) Loss 0.0411 (0.0411) Prec@1 99.219 (99.219)
Test: [1/7] Time 0.020 (0.416) Loss 0.0036 (0.0223) Prec@1 99.609 (99.414)
Test: [2/7] Time 0.016 (0.283) Loss 0.0016 (0.0154) Prec@1 100.000 (99.609)
Test: [3/7] Time 0.016 (0.216) Loss 0.0066 (0.0132) Prec@1 99.609 (99.609)
Test: [4/7] Time 0.019 (0.177) Loss 0.0003 (0.0106) Prec@1 100.000 (99.688)
Test: [5/7] Time 0.017 (0.150) Loss 0.0005 (0.0089) Prec@1 100.000 (99.740)
Test: [6/7] Time 0.088 (0.141) Loss 0.0362 (0.0114) Prec@1 99.359 (99.704)
* Prec@1 99.704
Creating CAM
Fatal error in main loop
Traceback (most recent call last):
File "test.py", line 171, in main
run_model(args, q)
File "test.py", line 249, in run_model
prec1 = validate(val_loader, model, criterion, normalize_factor, args ,q)
File "test.py", line 307, in validate
f"eval_results/{args['task']}/error_case/cam" , model, normalize_factor, cam_class=args['predict']['cam-class'])
File "/home/yh9468/detection/trainer/visualize/grad_cam.py", line 165, in make_grad_cam
images, raw_images, _ = load_images(image_paths, normalize_factor)
File "/home/yh9468/detection/trainer/visualize/grad_cam.py", line 54, in load_images
image, raw_image = preprocess(image_path, normalize_factor)
File "/home/yh9468/detection/trainer/visualize/grad_cam.py", line 78, in preprocess
raw_image = cv2.resize(raw_image, (128,) * 2)
cv2.error: OpenCV(4.0.0) /io/opencv/modules/imgproc/src/resize.cpp:3784: error: (-215:Assertion failed) !ssize.empty() in function 'resize'
[eval] failed
Number of model parameters: 154706
=> loading checkpoint 'output/Error/85219_model=MobilenetV3-ep=3000-block=4/checkpoint.pth.tar'
=> loaded checkpoint 'output/Error/85219_model=MobilenetV3-ep=3000-block=4/checkpoint.pth.tar' (epoch 3000)
Test: [0/7] Time 0.748 (0.748) Loss 0.0066 (0.0066) Prec@1 99.609 (99.609)
Test: [1/7] Time 0.018 (0.383) Loss 0.0003 (0.0035) Prec@1 100.000 (99.805)
Test: [2/7] Time 0.016 (0.261) Loss 0.0050 (0.0040) Prec@1 99.609 (99.740)
Test: [3/7] Time 0.017 (0.200) Loss 0.0385 (0.0126) Prec@1 99.219 (99.609)
Test: [4/7] Time 0.015 (0.163) Loss 0.0019 (0.0105) Prec@1 100.000 (99.688)
Test: [5/7] Time 0.016 (0.138) Loss 0.0230 (0.0126) Prec@1 99.609 (99.674)
Test: [6/7] Time 0.081 (0.130) Loss 0.0005 (0.0114) Prec@1 100.000 (99.704)
* Prec@1 99.704
Creating CAM
Best accuracy: 100.0
[eval] done
Number of model parameters: 154706
=> loading checkpoint 'output/Error/85219_model=MobilenetV3-ep=3000-block=4/checkpoint.pth.tar'
=> loaded checkpoint 'output/Error/85219_model=MobilenetV3-ep=3000-block=4/checkpoint.pth.tar' (epoch 3000)
Test: [0/7] Time 0.748 (0.748) Loss 0.0002 (0.0002) Prec@1 100.000 (100.000)
Test: [1/7] Time 0.036 (0.392) Loss 0.0034 (0.0018) Prec@1 99.609 (99.805)
Test: [2/7] Time 0.016 (0.266) Loss 0.0005 (0.0014) Prec@1 100.000 (99.870)
Test: [3/7] Time 0.017 (0.204) Loss 0.0267 (0.0077) Prec@1 99.219 (99.707)
Test: [4/7] Time 0.016 (0.167) Loss 0.0077 (0.0077) Prec@1 99.609 (99.688)
Test: [5/7] Time 0.016 (0.141) Loss 0.0367 (0.0125) Prec@1 99.609 (99.674)
Test: [6/7] Time 0.091 (0.134) Loss 0.0006 (0.0114) Prec@1 100.000 (99.704)
* Prec@1 99.704
Creating CAM
Best accuracy: 100.0
[eval] done
Number of model parameters: 318810
=> loading checkpoint 'output/Error/4374_model=MobilenetV3-ep=3000-block=4/best_model.pth.tar'
Fatal error in main loop
Traceback (most recent call last):
File "test.py", line 171, in main
run_model(args, q)
File "test.py", line 235, in run_model
checkpoint = torch.load(args['checkpoint'])
File "/usr/local/lib/python3.6/dist-packages/torch/serialization.py", line 382, in load
f = open(f, 'rb')
FileNotFoundError: [Errno 2] No such file or directory: 'output/Error/4374_model=MobilenetV3-ep=3000-block=4/best_model.pth.tar'
[eval] failed
Number of model parameters: 154706
=> loading checkpoint 'output/Error/4374_model=MobilenetV3-ep=3000-block=4/model_best.pth.tar'
=> loaded checkpoint 'output/Error/4374_model=MobilenetV3-ep=3000-block=4/model_best.pth.tar' (epoch 2348)
Test: [0/2] Time 1.785 (1.785) Loss 6.5358 (6.5358) Prec@1 67.969 (67.969)
Test: [1/2] Time 0.281 (1.033) Loss 6.3798 (6.4966) Prec@1 68.605 (68.129)
* Prec@1 68.129
Creating CAM
Best accuracy: 95.3216364899574
[eval] done
Number of model parameters: 154706
=> loading checkpoint 'output/Error/25039_model=MobilenetV3-ep=3000-block=4/model_best.pth.tar'
=> loaded checkpoint 'output/Error/25039_model=MobilenetV3-ep=3000-block=4/model_best.pth.tar' (epoch 229)
Test: [0/2] Time 1.178 (1.178) Loss 0.3317 (0.3317) Prec@1 89.062 (89.062)
Test: [1/2] Time 0.077 (0.627) Loss 0.3194 (0.3286) Prec@1 87.209 (88.596)
* Prec@1 88.596
Creating CAM
Best accuracy: 88.59649033574333
[eval] done
Number of model parameters: 154706
Number of model parameters: 154706
=> loading checkpoint 'output/Error/89458_model=MobilenetV3-ep=3000-block=4/model_best.pth.tar'
=> loaded checkpoint 'output/Error/89458_model=MobilenetV3-ep=3000-block=4/model_best.pth.tar' (epoch 128)
Test: [0/2] Time 1.903 (1.903) Loss 22.3839 (22.3839) Prec@1 66.016 (66.016)
Test: [1/2] Time 0.066 (0.985) Loss 16.9735 (21.0234) Prec@1 74.419 (68.129)
* Prec@1 68.129
Creating CAM
Best accuracy: 94.7368419267978
[eval] done
Number of model parameters: 154706
=> loading checkpoint 'output/Error/89458_model=MobilenetV3-ep=3000-block=4/model_best.pth.tar'
=> loaded checkpoint 'output/Error/89458_model=MobilenetV3-ep=3000-block=4/model_best.pth.tar' (epoch 302)
Test: [0/2] Time 1.783 (1.783) Loss 0.1690 (0.1690) Prec@1 94.922 (94.922)
Test: [1/2] Time 0.047 (0.915) Loss 0.1477 (0.1637) Prec@1 96.512 (95.322)
* Prec@1 95.322
Creating CAM
Best accuracy: 95.32163724843521
[eval] done
Number of model parameters: 154706
=> loading checkpoint 'output/Error/89458_model=MobilenetV3-ep=3000-block=4/checkpoint.pth.tar'
=> loaded checkpoint 'output/Error/89458_model=MobilenetV3-ep=3000-block=4/checkpoint.pth.tar' (epoch 2021)
Test: [0/14] Time 1.783 (1.783) Loss 0.1130 (0.1130) Prec@1 94.922 (94.922)
Test: [1/14] Time 0.127 (0.955) Loss 0.1829 (0.1479) Prec@1 91.016 (92.969)
Test: [2/14] Time 0.023 (0.644) Loss 0.1316 (0.1425) Prec@1 94.531 (93.490)
Test: [3/14] Time 0.022 (0.489) Loss 0.1273 (0.1387) Prec@1 95.312 (93.945)
Test: [4/14] Time 0.036 (0.398) Loss 0.1953 (0.1500) Prec@1 90.234 (93.203)
Test: [5/14] Time 0.029 (0.337) Loss 0.1562 (0.1510) Prec@1 93.359 (93.229)
Test: [6/14] Time 0.023 (0.292) Loss 0.1281 (0.1478) Prec@1 94.922 (93.471)
Test: [7/14] Time 0.032 (0.259) Loss 0.2017 (0.1545) Prec@1 91.797 (93.262)
Test: [8/14] Time 0.024 (0.233) Loss 0.1373 (0.1526) Prec@1 94.531 (93.403)
Test: [9/14] Time 0.029 (0.213) Loss 0.1353 (0.1509) Prec@1 94.531 (93.516)
Test: [10/14] Time 0.018 (0.195) Loss 0.1153 (0.1476) Prec@1 96.094 (93.750)
Test: [11/14] Time 0.046 (0.183) Loss 0.1625 (0.1489) Prec@1 92.578 (93.652)
Test: [12/14] Time 0.024 (0.170) Loss 0.1584 (0.1496) Prec@1 94.141 (93.690)
Test: [13/14] Time 0.098 (0.165) Loss 0.1581 (0.1498) Prec@1 92.473 (93.657)
* Prec@1 93.657
Creating CAM
Best accuracy: 95.90643123158237
[eval] done
Number of model parameters: 154706
=> loading checkpoint 'output/Error/39852_model=Efficientnet-ep=3000-block=4/checkpoint.pth.tar'
Fatal error in main loop
Traceback (most recent call last):
File "test.py", line 174, in main
run_model(args, q)
File "test.py", line 241, in run_model
model.load_state_dict(checkpoint['state_dict'])
File "/usr/local/lib/python3.6/dist-packages/torch/nn/modules/module.py", line 777, in load_state_dict
self.__class__.__name__, "\n\t".join(error_msgs)))
RuntimeError: Error(s) in loading state_dict for DataParallel:
Missing key(s) in state_dict: "module.features.0.0.weight", "module.features.0.1.weight", "module.features.0.1.bias", "module.features.0.1.running_mean", "module.features.0.1.running_var", "module.features.1.conv.0.weight", "module.features.1.conv.1.weight", "module.features.1.conv.1.bias", "module.features.1.conv.1.running_mean", "module.features.1.conv.1.running_var", "module.features.1.conv.3.weight", "module.features.1.conv.4.weight", "module.features.1.conv.4.bias", "module.features.1.conv.4.running_mean", "module.features.1.conv.4.running_var", "module.features.1.conv.5.fc.0.weight", "module.features.1.conv.5.fc.2.weight", "module.features.1.conv.7.weight", "module.features.1.conv.8.weight", "module.features.1.conv.8.bias", "module.features.1.conv.8.running_mean", "module.features.1.conv.8.running_var", "module.features.2.conv.0.weight", "module.features.2.conv.1.weight", "module.features.2.conv.1.bias", "module.features.2.conv.1.running_mean", "module.features.2.conv.1.running_var", "module.features.2.conv.3.weight", "module.features.2.conv.4.weight", "module.features.2.conv.4.bias", "module.features.2.conv.4.running_mean", "module.features.2.conv.4.running_var", "module.features.2.conv.7.weight", "module.features.2.conv.8.weight", "module.features.2.conv.8.bias", "module.features.2.conv.8.running_mean", "module.features.2.conv.8.running_var", "module.features.3.conv.0.weight", "module.features.3.conv.1.weight", "module.features.3.conv.1.bias", "module.features.3.conv.1.running_mean", "module.features.3.conv.1.running_var", "module.features.3.conv.3.weight", "module.features.3.conv.4.weight", "module.features.3.conv.4.bias", "module.features.3.conv.4.running_mean", "module.features.3.conv.4.running_var", "module.features.3.conv.7.weight", "module.features.3.conv.8.weight", "module.features.3.conv.8.bias", "module.features.3.conv.8.running_mean", "module.features.3.conv.8.running_var", "module.features.4.conv.0.weight", "module.features.4.conv.1.weight", "module.features.4.conv.1.bias", "module.features.4.conv.1.running_mean", "module.features.4.conv.1.running_var", "module.features.4.conv.3.weight", "module.features.4.conv.4.weight", "module.features.4.conv.4.bias", "module.features.4.conv.4.running_mean", "module.features.4.conv.4.running_var", "module.features.4.conv.5.fc.0.weight", "module.features.4.conv.5.fc.2.weight", "module.features.4.conv.7.weight", "module.features.4.conv.8.weight", "module.features.4.conv.8.bias", "module.features.4.conv.8.running_mean", "module.features.4.conv.8.running_var", "module.features.5.0.weight", "module.features.5.1.weight", "module.features.5.1.bias", "module.features.5.1.running_mean", "module.features.5.1.running_var", "module.features.7.weight", "module.features.7.bias", "module.classifier.1.weight", "module.classifier.1.bias".
Unexpected key(s) in state_dict: "module._conv_stem.weight", "module._bn0.weight", "module._bn0.bias", "module._bn0.running_mean", "module._bn0.running_var", "module._bn0.num_batches_tracked", "module._blocks.0._depthwise_conv.weight", "module._blocks.0._bn1.weight", "module._blocks.0._bn1.bias", "module._blocks.0._bn1.running_mean", "module._blocks.0._bn1.running_var", "module._blocks.0._bn1.num_batches_tracked", "module._blocks.0._se_reduce.weight", "module._blocks.0._se_reduce.bias", "module._blocks.0._se_expand.weight", "module._blocks.0._se_expand.bias", "module._blocks.0._project_conv.weight", "module._blocks.0._bn2.weight", "module._blocks.0._bn2.bias", "module._blocks.0._bn2.running_mean", "module._blocks.0._bn2.running_var", "module._blocks.0._bn2.num_batches_tracked", "module._blocks.1._expand_conv.weight", "module._blocks.1._bn0.weight", "module._blocks.1._bn0.bias", "module._blocks.1._bn0.running_mean", "module._blocks.1._bn0.running_var", "module._blocks.1._bn0.num_batches_tracked", "module._blocks.1._depthwise_conv.weight", "module._blocks.1._bn1.weight", "module._blocks.1._bn1.bias", "module._blocks.1._bn1.running_mean", "module._blocks.1._bn1.running_var", "module._blocks.1._bn1.num_batches_tracked", "module._blocks.1._se_reduce.weight", "module._blocks.1._se_reduce.bias", "module._blocks.1._se_expand.weight", "module._blocks.1._se_expand.bias", "module._blocks.1._project_conv.weight", "module._blocks.1._bn2.weight", "module._blocks.1._bn2.bias", "module._blocks.1._bn2.running_mean", "module._blocks.1._bn2.running_var", "module._blocks.1._bn2.num_batches_tracked", "module._blocks.2._expand_conv.weight", "module._blocks.2._bn0.weight", "module._blocks.2._bn0.bias", "module._blocks.2._bn0.running_mean", "module._blocks.2._bn0.running_var", "module._blocks.2._bn0.num_batches_tracked", "module._blocks.2._depthwise_conv.weight", "module._blocks.2._bn1.weight", "module._blocks.2._bn1.bias", "module._blocks.2._bn1.running_mean", "module._blocks.2._bn1.running_var", "module._blocks.2._bn1.num_batches_tracked", "module._blocks.2._se_reduce.weight", "module._blocks.2._se_reduce.bias", "module._blocks.2._se_expand.weight", "module._blocks.2._se_expand.bias", "module._blocks.2._project_conv.weight", "module._blocks.2._bn2.weight", "module._blocks.2._bn2.bias", "module._blocks.2._bn2.running_mean", "module._blocks.2._bn2.running_var", "module._blocks.2._bn2.num_batches_tracked", "module._blocks.3._expand_conv.weight", "module._blocks.3._bn0.weight", "module._blocks.3._bn0.bias", "module._blocks.3._bn0.running_mean", "module._blocks.3._bn0.running_var", "module._blocks.3._bn0.num_batches_tracked", "module._blocks.3._depthwise_conv.weight", "module._blocks.3._bn1.weight", "module._blocks.3._bn1.bias", "module._blocks.3._bn1.running_mean", "module._blocks.3._bn1.running_var", "module._blocks.3._bn1.num_batches_tracked", "module._blocks.3._se_reduce.weight", "module._blocks.3._se_reduce.bias", "module._blocks.3._se_expand.weight", "module._blocks.3._se_expand.bias", "module._blocks.3._project_conv.weight", "module._blocks.3._bn2.weight", "module._blocks.3._bn2.bias", "module._blocks.3._bn2.running_mean", "module._blocks.3._bn2.running_var", "module._blocks.3._bn2.num_batches_tracked", "module._blocks.4._expand_conv.weight", "module._blocks.4._bn0.weight", "module._blocks.4._bn0.bias", "module._blocks.4._bn0.running_mean", "module._blocks.4._bn0.running_var", "module._blocks.4._bn0.num_batches_tracked", "module._blocks.4._depthwise_conv.weight", "module._blocks.4._bn1.weight", "module._blocks.4._bn1.bias", "module._blocks.4._bn1.running_mean", "module._blocks.4._bn1.running_var", "module._blocks.4._bn1.num_batches_tracked", "module._blocks.4._se_reduce.weight", "module._blocks.4._se_reduce.bias", "module._blocks.4._se_expand.weight", "module._blocks.4._se_expand.bias", "module._blocks.4._project_conv.weight", "module._blocks.4._bn2.weight", "module._blocks.4._bn2.bias", "module._blocks.4._bn2.running_mean", "module._blocks.4._bn2.running_var", "module._blocks.4._bn2.num_batches_tracked", "module._blocks.5._expand_conv.weight", "module._blocks.5._bn0.weight", "module._blocks.5._bn0.bias", "module._blocks.5._bn0.running_mean", "module._blocks.5._bn0.running_var", "module._blocks.5._bn0.num_batches_tracked", "module._blocks.5._depthwise_conv.weight", "module._blocks.5._bn1.weight", "module._blocks.5._bn1.bias", "module._blocks.5._bn1.running_mean", "module._blocks.5._bn1.running_var", "module._blocks.5._bn1.num_batches_tracked", "module._blocks.5._se_reduce.weight", "module._blocks.5._se_reduce.bias", "module._blocks.5._se_expand.weight", "module._blocks.5._se_expand.bias", "module._blocks.5._project_conv.weight", "module._blocks.5._bn2.weight", "module._blocks.5._bn2.bias", "module._blocks.5._bn2.running_mean", "module._blocks.5._bn2.running_var", "module._blocks.5._bn2.num_batches_tracked", "module._blocks.6._expand_conv.weight", "module._blocks.6._bn0.weight", "module._blocks.6._bn0.bias", "module._blocks.6._bn0.running_mean", "module._blocks.6._bn0.running_var", "module._blocks.6._bn0.num_batches_tracked", "module._blocks.6._depthwise_conv.weight", "module._blocks.6._bn1.weight", "module._blocks.6._bn1.bias", "module._blocks.6._bn1.running_mean", "module._blocks.6._bn1.running_var", "module._blocks.6._bn1.num_batches_tracked", "module._blocks.6._se_reduce.weight", "module._blocks.6._se_reduce.bias", "module._blocks.6._se_expand.weight", "module._blocks.6._se_expand.bias", "module._blocks.6._project_conv.weight", "module._blocks.6._bn2.weight", "module._blocks.6._bn2.bias", "module._blocks.6._bn2.running_mean", "module._blocks.6._bn2.running_var", "module._blocks.6._bn2.num_batches_tracked", "module._blocks.7._expand_conv.weight", "module._blocks.7._bn0.weight", "module._blocks.7._bn0.bias", "module._blocks.7._bn0.running_mean", "module._blocks.7._bn0.running_var", "module._blocks.7._bn0.num_batches_tracked", "module._blocks.7._depthwise_conv.weight", "module._blocks.7._bn1.weight", "module._blocks.7._bn1.bias", "module._blocks.7._bn1.running_mean", "module._blocks.7._bn1.running_var", "module._blocks.7._bn1.num_batches_tracked", "module._blocks.7._se_reduce.weight", "module._blocks.7._se_reduce.bias", "module._blocks.7._se_expand.weight", "module._blocks.7._se_expand.bias", "module._blocks.7._project_conv.weight", "module._blocks.7._bn2.weight", "module._blocks.7._bn2.bias", "module._blocks.7._bn2.running_mean", "module._blocks.7._bn2.running_var", "module._blocks.7._bn2.num_batches_tracked", "module._blocks.8._expand_conv.weight", "module._blocks.8._bn0.weight", "module._blocks.8._bn0.bias", "module._blocks.8._bn0.running_mean", "module._blocks.8._bn0.running_var", "module._blocks.8._bn0.num_batches_tracked", "module._blocks.8._depthwise_conv.weight", "module._blocks.8._bn1.weight", "module._blocks.8._bn1.bias", "module._blocks.8._bn1.running_mean", "module._blocks.8._bn1.running_var", "module._blocks.8._bn1.num_batches_tracked", "module._blocks.8._se_reduce.weight", "module._blocks.8._se_reduce.bias", "module._blocks.8._se_expand.weight", "module._blocks.8._se_expand.bias", "module._blocks.8._project_conv.weight", "module._blocks.8._bn2.weight", "module._blocks.8._bn2.bias", "module._blocks.8._bn2.running_mean", "module._blocks.8._bn2.running_var", "module._blocks.8._bn2.num_batches_tracked", "module._blocks.9._expand_conv.weight", "module._blocks.9._bn0.weight", "module._blocks.9._bn0.bias", "module._blocks.9._bn0.running_mean", "module._blocks.9._bn0.running_var", "module._blocks.9._bn0.num_batches_tracked", "module._blocks.9._depthwise_conv.weight", "module._blocks.9._bn1.weight", "module._blocks.9._bn1.bias", "module._blocks.9._bn1.running_mean", "module._blocks.9._bn1.running_var", "module._blocks.9._bn1.num_batches_tracked", "module._blocks.9._se_reduce.weight", "module._blocks.9._se_reduce.bias", "module._blocks.9._se_expand.weight", "module._blocks.9._se_expand.bias", "module._blocks.9._project_conv.weight", "module._blocks.9._bn2.weight", "module._blocks.9._bn2.bias", "module._blocks.9._bn2.running_mean", "module._blocks.9._bn2.running_var", "module._blocks.9._bn2.num_batches_tracked", "module._blocks.10._expand_conv.weight", "module._blocks.10._bn0.weight", "module._blocks.10._bn0.bias", "module._blocks.10._bn0.running_mean", "module._blocks.10._bn0.running_var", "module._blocks.10._bn0.num_batches_tracked", "module._blocks.10._depthwise_conv.weight", "module._blocks.10._bn1.weight", "module._blocks.10._bn1.bias", "module._blocks.10._bn1.running_mean", "module._blocks.10._bn1.running_var", "module._blocks.10._bn1.num_batches_tracked", "module._blocks.10._se_reduce.weight", "module._blocks.10._se_reduce.bias", "module._blocks.10._se_expand.weight", "module._blocks.10._se_expand.bias", "module._blocks.10._project_conv.weight", "module._blocks.10._bn2.weight", "module._blocks.10._bn2.bias", "module._blocks.10._bn2.running_mean", "module._blocks.10._bn2.running_var", "module._blocks.10._bn2.num_batches_tracked", "module._blocks.11._expand_conv.weight", "module._blocks.11._bn0.weight", "module._blocks.11._bn0.bias", "module._blocks.11._bn0.running_mean", "module._blocks.11._bn0.running_var", "module._blocks.11._bn0.num_batches_tracked", "module._blocks.11._depthwise_conv.weight", "module._blocks.11._bn1.weight", "module._blocks.11._bn1.bias", "module._blocks.11._bn1.running_mean", "module._blocks.11._bn1.running_var", "module._blocks.11._bn1.num_batches_tracked", "module._blocks.11._se_reduce.weight", "module._blocks.11._se_reduce.bias", "module._blocks.11._se_expand.weight", "module._blocks.11._se_expand.bias", "module._blocks.11._project_conv.weight", "module._blocks.11._bn2.weight", "module._blocks.11._bn2.bias", "module._blocks.11._bn2.running_mean", "module._blocks.11._bn2.running_var", "module._blocks.11._bn2.num_batches_tracked", "module._blocks.12._expand_conv.weight", "module._blocks.12._bn0.weight", "module._blocks.12._bn0.bias", "module._blocks.12._bn0.running_mean", "module._blocks.12._bn0.running_var", "module._blocks.12._bn0.num_batches_tracked", "module._blocks.12._depthwise_conv.weight", "module._blocks.12._bn1.weight", "module._blocks.12._bn1.bias", "module._blocks.12._bn1.running_mean", "module._blocks.12._bn1.running_var", "module._blocks.12._bn1.num_batches_tracked", "module._blocks.12._se_reduce.weight", "module._blocks.12._se_reduce.bias", "module._blocks.12._se_expand.weight", "module._blocks.12._se_expand.bias", "module._blocks.12._project_conv.weight", "module._blocks.12._bn2.weight", "module._blocks.12._bn2.bias", "module._blocks.12._bn2.running_mean", "module._blocks.12._bn2.running_var", "module._blocks.12._bn2.num_batches_tracked", "module._blocks.13._expand_conv.weight", "module._blocks.13._bn0.weight", "module._blocks.13._bn0.bias", "module._blocks.13._bn0.running_mean", "module._blocks.13._bn0.running_var", "module._blocks.13._bn0.num_batches_tracked", "module._blocks.13._depthwise_conv.weight", "module._blocks.13._bn1.weight", "module._blocks.13._bn1.bias", "module._blocks.13._bn1.running_mean", "module._blocks.13._bn1.running_var", "module._blocks.13._bn1.num_batches_tracked", "module._blocks.13._se_reduce.weight", "module._blocks.13._se_reduce.bias", "module._blocks.13._se_expand.weight", "module._blocks.13._se_expand.bias", "module._blocks.13._project_conv.weight", "module._blocks.13._bn2.weight", "module._blocks.13._bn2.bias", "module._blocks.13._bn2.running_mean", "module._blocks.13._bn2.running_var", "module._blocks.13._bn2.num_batches_tracked", "module._blocks.14._expand_conv.weight", "module._blocks.14._bn0.weight", "module._blocks.14._bn0.bias", "module._blocks.14._bn0.running_mean", "module._blocks.14._bn0.running_var", "module._blocks.14._bn0.num_batches_tracked", "module._blocks.14._depthwise_conv.weight", "module._blocks.14._bn1.weight", "module._blocks.14._bn1.bias", "module._blocks.14._bn1.running_mean", "module._blocks.14._bn1.running_var", "module._blocks.14._bn1.num_batches_tracked", "module._blocks.14._se_reduce.weight", "module._blocks.14._se_reduce.bias", "module._blocks.14._se_expand.weight", "module._blocks.14._se_expand.bias", "module._blocks.14._project_conv.weight", "module._blocks.14._bn2.weight", "module._blocks.14._bn2.bias", "module._blocks.14._bn2.running_mean", "module._blocks.14._bn2.running_var", "module._blocks.14._bn2.num_batches_tracked", "module._blocks.15._expand_conv.weight", "module._blocks.15._bn0.weight", "module._blocks.15._bn0.bias", "module._blocks.15._bn0.running_mean", "module._blocks.15._bn0.running_var", "module._blocks.15._bn0.num_batches_tracked", "module._blocks.15._depthwise_conv.weight", "module._blocks.15._bn1.weight", "module._blocks.15._bn1.bias", "module._blocks.15._bn1.running_mean", "module._blocks.15._bn1.running_var", "module._blocks.15._bn1.num_batches_tracked", "module._blocks.15._se_reduce.weight", "module._blocks.15._se_reduce.bias", "module._blocks.15._se_expand.weight", "module._blocks.15._se_expand.bias", "module._blocks.15._project_conv.weight", "module._blocks.15._bn2.weight", "module._blocks.15._bn2.bias", "module._blocks.15._bn2.running_mean", "module._blocks.15._bn2.running_var", "module._blocks.15._bn2.num_batches_tracked", "module._conv_head.weight", "module._bn1.weight", "module._bn1.bias", "module._bn1.running_mean", "module._bn1.running_var", "module._bn1.num_batches_tracked", "module._fc.weight", "module._fc.bias".
[eval] failed
Number of model parameters: 4009534
=> loading checkpoint 'output/Error/39852_model=Efficientnet-ep=3000-block=4/checkpoint.pth.tar'
=> loaded checkpoint 'output/Error/39852_model=Efficientnet-ep=3000-block=4/checkpoint.pth.tar' (epoch 60)
Test: [0/214] Time 11.201 (11.201) Loss 0.2607 (0.2607) Prec@1 87.500 (87.500)
Test: [1/214] Time 0.067 (5.634) Loss 0.1578 (0.2093) Prec@1 100.000 (93.750)
Test: [2/214] Time 0.046 (3.771) Loss 0.1060 (0.1748) Prec@1 100.000 (95.833)
Test: [3/214] Time 0.061 (2.844) Loss 0.3034 (0.2070) Prec@1 81.250 (92.188)
Test: [4/214] Time 0.087 (2.292) Loss 0.1685 (0.1993) Prec@1 87.500 (91.250)
Test: [5/214] Time 0.091 (1.925) Loss 0.1480 (0.1907) Prec@1 93.750 (91.667)
Test: [6/214] Time 0.044 (1.657) Loss 0.1116 (0.1794) Prec@1 93.750 (91.964)
Test: [7/214] Time 0.045 (1.455) Loss 0.2224 (0.1848) Prec@1 87.500 (91.406)
Test: [8/214] Time 0.132 (1.308) Loss 0.2058 (0.1871) Prec@1 93.750 (91.667)
Test: [9/214] Time 0.082 (1.186) Loss 0.4716 (0.2156) Prec@1 75.000 (90.000)
Test: [10/214] Time 0.131 (1.090) Loss 0.9066 (0.2784) Prec@1 50.000 (86.364)
Test: [11/214] Time 0.119 (1.009) Loss 0.3244 (0.2822) Prec@1 75.000 (85.417)
Test: [12/214] Time 0.097 (0.939) Loss 0.4440 (0.2947) Prec@1 81.250 (85.096)
Test: [13/214] Time 0.056 (0.876) Loss 0.2533 (0.2917) Prec@1 87.500 (85.268)
Test: [14/214] Time 0.050 (0.821) Loss 0.4654 (0.3033) Prec@1 68.750 (84.167)
Test: [15/214] Time 0.063 (0.773) Loss 0.1495 (0.2937) Prec@1 93.750 (84.766)
Test: [16/214] Time 0.096 (0.733) Loss 0.2305 (0.2900) Prec@1 93.750 (85.294)
Test: [17/214] Time 0.095 (0.698) Loss 0.2496 (0.2877) Prec@1 87.500 (85.417)
Test: [18/214] Time 0.135 (0.668) Loss 0.3048 (0.2886) Prec@1 75.000 (84.868)
Test: [19/214] Time 0.158 (0.643) Loss 0.3770 (0.2930) Prec@1 87.500 (85.000)
Test: [20/214] Time 0.081 (0.616) Loss 0.3288 (0.2947) Prec@1 87.500 (85.119)
Test: [21/214] Time 0.042 (0.590) Loss 0.1715 (0.2891) Prec@1 93.750 (85.511)
Test: [22/214] Time 0.044 (0.566) Loss 0.4496 (0.2961) Prec@1 81.250 (85.326)
Test: [23/214] Time 0.053 (0.545) Loss 0.1729 (0.2910) Prec@1 93.750 (85.677)
Test: [24/214] Time 0.078 (0.526) Loss 0.0676 (0.2820) Prec@1 100.000 (86.250)
Test: [25/214] Time 0.120 (0.511) Loss 0.4611 (0.2889) Prec@1 81.250 (86.058)
Test: [26/214] Time 0.095 (0.495) Loss 0.2551 (0.2877) Prec@1 87.500 (86.111)
Test: [27/214] Time 0.086 (0.481) Loss 0.3308 (0.2892) Prec@1 81.250 (85.938)
Test: [28/214] Time 0.073 (0.466) Loss 0.2250 (0.2870) Prec@1 87.500 (85.991)
Test: [29/214] Time 0.065 (0.453) Loss 0.5208 (0.2948) Prec@1 62.500 (85.208)
Test: [30/214] Time 0.053 (0.440) Loss 0.4636 (0.3002) Prec@1 75.000 (84.879)
Test: [31/214] Time 0.056 (0.428) Loss 0.3666 (0.3023) Prec@1 75.000 (84.570)
Test: [32/214] Time 0.050 (0.417) Loss 0.2060 (0.2994) Prec@1 87.500 (84.659)
Test: [33/214] Time 0.098 (0.407) Loss 0.1164 (0.2940) Prec@1 93.750 (84.926)
Test: [34/214] Time 0.143 (0.400) Loss 0.3333 (0.2951) Prec@1 87.500 (85.000)
Test: [35/214] Time 0.113 (0.392) Loss 0.1818 (0.2920) Prec@1 93.750 (85.243)
Test: [36/214] Time 0.106 (0.384) Loss 0.2087 (0.2897) Prec@1 87.500 (85.304)
Test: [37/214] Time 0.101 (0.377) Loss 0.2186 (0.2879) Prec@1 87.500 (85.362)
Test: [38/214] Time 0.096 (0.369) Loss 0.0996 (0.2830) Prec@1 93.750 (85.577)
Test: [39/214] Time 0.083 (0.362) Loss 0.3121 (0.2838) Prec@1 93.750 (85.781)
Test: [40/214] Time 0.062 (0.355) Loss 0.2222 (0.2823) Prec@1 87.500 (85.823)
Test: [41/214] Time 0.080 (0.348) Loss 0.4081 (0.2853) Prec@1 81.250 (85.714)
Test: [42/214] Time 0.064 (0.342) Loss 0.3267 (0.2862) Prec@1 81.250 (85.610)
Test: [43/214] Time 0.045 (0.335) Loss 0.0808 (0.2816) Prec@1 100.000 (85.938)
Test: [44/214] Time 0.066 (0.329) Loss 0.5449 (0.2874) Prec@1 62.500 (85.417)
Test: [45/214] Time 0.093 (0.324) Loss 0.3741 (0.2893) Prec@1 81.250 (85.326)
Test: [46/214] Time 0.059 (0.318) Loss 0.1078 (0.2854) Prec@1 93.750 (85.505)
Test: [47/214] Time 0.071 (0.313) Loss 0.2775 (0.2853) Prec@1 87.500 (85.547)
Test: [48/214] Time 0.105 (0.309) Loss 0.3871 (0.2873) Prec@1 68.750 (85.204)
Test: [49/214] Time 0.094 (0.305) Loss 0.5400 (0.2924) Prec@1 75.000 (85.000)
Test: [50/214] Time 0.074 (0.300) Loss 0.2073 (0.2907) Prec@1 93.750 (85.172)
Test: [51/214] Time 0.120 (0.297) Loss 0.3317 (0.2915) Prec@1 81.250 (85.096)
Test: [52/214] Time 0.284 (0.296) Loss 0.4489 (0.2945) Prec@1 68.750 (84.788)
Test: [53/214] Time 0.129 (0.293) Loss 0.3582 (0.2957) Prec@1 81.250 (84.722)
Test: [54/214] Time 0.097 (0.290) Loss 0.2803 (0.2954) Prec@1 93.750 (84.886)
Test: [55/214] Time 0.132 (0.287) Loss 0.2129 (0.2939) Prec@1 93.750 (85.045)
Test: [56/214] Time 0.123 (0.284) Loss 0.1677 (0.2917) Prec@1 87.500 (85.088)
Test: [57/214] Time 0.125 (0.281) Loss 0.3602 (0.2929) Prec@1 75.000 (84.914)
Test: [58/214] Time 0.135 (0.279) Loss 0.2394 (0.2920) Prec@1 87.500 (84.958)
Test: [59/214] Time 0.546 (0.283) Loss 0.3783 (0.2934) Prec@1 81.250 (84.896)
Test: [60/214] Time 0.082 (0.280) Loss 0.3108 (0.2937) Prec@1 81.250 (84.836)
Test: [61/214] Time 0.059 (0.276) Loss 0.1151 (0.2908) Prec@1 93.750 (84.980)
Test: [62/214] Time 0.093 (0.274) Loss 0.1965 (0.2893) Prec@1 87.500 (85.020)
Test: [63/214] Time 0.075 (0.270) Loss 0.3316 (0.2900) Prec@1 75.000 (84.863)
Test: [64/214] Time 0.105 (0.268) Loss 0.3229 (0.2905) Prec@1 81.250 (84.808)
Test: [65/214] Time 0.108 (0.265) Loss 0.1841 (0.2889) Prec@1 87.500 (84.848)
Test: [66/214] Time 0.096 (0.263) Loss 0.2653 (0.2885) Prec@1 87.500 (84.888)
Test: [67/214] Time 0.069 (0.260) Loss 0.2731 (0.2883) Prec@1 81.250 (84.835)
Test: [68/214] Time 0.057 (0.257) Loss 0.1263 (0.2859) Prec@1 93.750 (84.964)
Test: [69/214] Time 0.056 (0.254) Loss 0.2317 (0.2852) Prec@1 93.750 (85.089)
Test: [70/214] Time 0.050 (0.251) Loss 0.1820 (0.2837) Prec@1 87.500 (85.123)
Test: [71/214] Time 0.067 (0.249) Loss 0.4579 (0.2861) Prec@1 75.000 (84.983)
Test: [72/214] Time 0.110 (0.247) Loss 0.2124 (0.2851) Prec@1 87.500 (85.017)
Test: [73/214] Time 0.147 (0.246) Loss 0.4542 (0.2874) Prec@1 62.500 (84.713)
Test: [74/214] Time 0.176 (0.245) Loss 0.2970 (0.2875) Prec@1 75.000 (84.583)
Test: [75/214] Time 0.101 (0.243) Loss 0.0447 (0.2843) Prec@1 100.000 (84.786)
Test: [76/214] Time 0.077 (0.241) Loss 0.4577 (0.2866) Prec@1 81.250 (84.740)
Test: [77/214] Time 0.053 (0.238) Loss 0.0922 (0.2841) Prec@1 100.000 (84.936)
Test: [78/214] Time 0.053 (0.236) Loss 0.2799 (0.2841) Prec@1 87.500 (84.968)
Test: [79/214] Time 0.062 (0.234) Loss 0.2562 (0.2837) Prec@1 81.250 (84.922)
Test: [80/214] Time 0.062 (0.232) Loss 0.1314 (0.2818) Prec@1 93.750 (85.031)
Test: [81/214] Time 0.061 (0.230) Loss 0.1779 (0.2806) Prec@1 93.750 (85.137)
Test: [82/214] Time 0.193 (0.229) Loss 0.3503 (0.2814) Prec@1 93.750 (85.241)
Test: [83/214] Time 0.070 (0.227) Loss 0.2390 (0.2809) Prec@1 93.750 (85.342)
Test: [84/214] Time 0.067 (0.225) Loss 0.2989 (0.2811) Prec@1 87.500 (85.368)
Test: [85/214] Time 0.077 (0.224) Loss 0.3316 (0.2817) Prec@1 81.250 (85.320)
Test: [86/214] Time 0.047 (0.222) Loss 0.2570 (0.2814) Prec@1 87.500 (85.345)
Test: [87/214] Time 0.055 (0.220) Loss 0.3271 (0.2819) Prec@1 81.250 (85.298)
Test: [88/214] Time 0.047 (0.218) Loss 0.1733 (0.2807) Prec@1 93.750 (85.393)
Test: [89/214] Time 0.054 (0.216) Loss 0.6486 (0.2848) Prec@1 62.500 (85.139)
Test: [90/214] Time 0.042 (0.214) Loss 0.1517 (0.2833) Prec@1 93.750 (85.234)
Test: [91/214] Time 0.045 (0.212) Loss 0.3418 (0.2840) Prec@1 81.250 (85.190)
Test: [92/214] Time 0.051 (0.210) Loss 0.5220 (0.2865) Prec@1 68.750 (85.013)
Test: [93/214] Time 0.057 (0.209) Loss 0.4164 (0.2879) Prec@1 75.000 (84.907)
Test: [94/214] Time 0.068 (0.207) Loss 0.4121 (0.2892) Prec@1 81.250 (84.868)
Test: [95/214] Time 0.056 (0.206) Loss 0.1503 (0.2878) Prec@1 87.500 (84.896)
Test: [96/214] Time 0.087 (0.204) Loss 0.2766 (0.2877) Prec@1 81.250 (84.858)
Test: [97/214] Time 0.115 (0.204) Loss 0.2395 (0.2872) Prec@1 93.750 (84.949)
Test: [98/214] Time 0.138 (0.203) Loss 0.4426 (0.2887) Prec@1 75.000 (84.848)
Test: [99/214] Time 0.144 (0.202) Loss 0.4849 (0.2907) Prec@1 75.000 (84.750)
Test: [100/214] Time 0.131 (0.202) Loss 0.1662 (0.2895) Prec@1 87.500 (84.777)
Test: [101/214] Time 0.092 (0.201) Loss 0.1042 (0.2876) Prec@1 93.750 (84.865)
Test: [102/214] Time 0.088 (0.199) Loss 0.4668 (0.2894) Prec@1 81.250 (84.830)
Test: [103/214] Time 0.082 (0.198) Loss 0.3621 (0.2901) Prec@1 81.250 (84.796)
Test: [104/214] Time 0.077 (0.197) Loss 0.4676 (0.2918) Prec@1 81.250 (84.762)
Test: [105/214] Time 0.101 (0.196) Loss 0.1992 (0.2909) Prec@1 87.500 (84.788)
Test: [106/214] Time 0.150 (0.196) Loss 0.2772 (0.2908) Prec@1 93.750 (84.871)
Test: [107/214] Time 0.070 (0.195) Loss 0.3883 (0.2917) Prec@1 75.000 (84.780)
Test: [108/214] Time 0.078 (0.194) Loss 0.2923 (0.2917) Prec@1 81.250 (84.748)
Test: [109/214] Time 0.094 (0.193) Loss 0.3286 (0.2920) Prec@1 87.500 (84.773)
Test: [110/214] Time 0.046 (0.191) Loss 0.1382 (0.2906) Prec@1 93.750 (84.854)
Test: [111/214] Time 0.050 (0.190) Loss 0.2462 (0.2902) Prec@1 87.500 (84.877)
Test: [112/214] Time 0.085 (0.189) Loss 0.1496 (0.2890) Prec@1 100.000 (85.011)
Test: [113/214] Time 0.091 (0.188) Loss 0.2322 (0.2885) Prec@1 93.750 (85.088)
Test: [114/214] Time 0.144 (0.188) Loss 0.3522 (0.2890) Prec@1 81.250 (85.054)
Test: [115/214] Time 0.106 (0.187) Loss 0.4547 (0.2905) Prec@1 81.250 (85.022)
Test: [116/214] Time 0.050 (0.186) Loss 0.3842 (0.2913) Prec@1 87.500 (85.043)
Test: [117/214] Time 0.043 (0.185) Loss 0.2003 (0.2905) Prec@1 93.750 (85.117)
Test: [118/214] Time 0.049 (0.184) Loss 0.2329 (0.2900) Prec@1 87.500 (85.137)
Test: [119/214] Time 0.053 (0.183) Loss 0.1716 (0.2890) Prec@1 93.750 (85.208)
Test: [120/214] Time 0.043 (0.181) Loss 0.3089 (0.2892) Prec@1 87.500 (85.227)
Test: [121/214] Time 0.048 (0.180) Loss 0.3135 (0.2894) Prec@1 87.500 (85.246)
Test: [122/214] Time 0.072 (0.179) Loss 0.4701 (0.2909) Prec@1 68.750 (85.112)
Test: [123/214] Time 0.056 (0.178) Loss 0.4308 (0.2920) Prec@1 75.000 (85.030)
Test: [124/214] Time 0.086 (0.178) Loss 0.2072 (0.2913) Prec@1 87.500 (85.050)
Test: [125/214] Time 0.108 (0.177) Loss 0.4704 (0.2927) Prec@1 68.750 (84.921)
Test: [126/214] Time 0.082 (0.176) Loss 0.0354 (0.2907) Prec@1 100.000 (85.039)
Test: [127/214] Time 0.080 (0.176) Loss 0.1397 (0.2895) Prec@1 93.750 (85.107)
Test: [128/214] Time 0.078 (0.175) Loss 0.2489 (0.2892) Prec@1 93.750 (85.174)
Test: [129/214] Time 0.085 (0.174) Loss 0.2714 (0.2891) Prec@1 81.250 (85.144)
Test: [130/214] Time 0.189 (0.174) Loss 0.4883 (0.2906) Prec@1 81.250 (85.115)
Test: [131/214] Time 0.065 (0.174) Loss 0.2942 (0.2906) Prec@1 87.500 (85.133)
Test: [132/214] Time 0.051 (0.173) Loss 0.1635 (0.2897) Prec@1 100.000 (85.244)
Test: [133/214] Time 0.058 (0.172) Loss 0.7558 (0.2932) Prec@1 62.500 (85.075)
Test: [134/214] Time 0.057 (0.171) Loss 0.1536 (0.2921) Prec@1 93.750 (85.139)
Test: [135/214] Time 0.076 (0.170) Loss 0.4966 (0.2936) Prec@1 75.000 (85.064)
Test: [136/214] Time 0.036 (0.169) Loss 0.2342 (0.2932) Prec@1 87.500 (85.082)
Test: [137/214] Time 0.095 (0.169) Loss 0.2743 (0.2930) Prec@1 87.500 (85.100)
Test: [138/214] Time 0.106 (0.168) Loss 0.3421 (0.2934) Prec@1 87.500 (85.117)
Test: [139/214] Time 0.175 (0.168) Loss 0.3205 (0.2936) Prec@1 81.250 (85.089)
Test: [140/214] Time 0.123 (0.168) Loss 0.4365 (0.2946) Prec@1 81.250 (85.062)
Test: [141/214] Time 0.130 (0.168) Loss 0.4789 (0.2959) Prec@1 75.000 (84.991)
Test: [142/214] Time 0.156 (0.168) Loss 0.3277 (0.2961) Prec@1 81.250 (84.965)
Test: [143/214] Time 0.147 (0.167) Loss 0.5026 (0.2976) Prec@1 75.000 (84.896)
Test: [144/214] Time 0.109 (0.167) Loss 0.2051 (0.2969) Prec@1 81.250 (84.871)
Test: [145/214] Time 0.137 (0.167) Loss 0.7212 (0.2998) Prec@1 62.500 (84.717)
Test: [146/214] Time 0.115 (0.166) Loss 0.3626 (0.3003) Prec@1 81.250 (84.694)
Test: [147/214] Time 0.132 (0.166) Loss 0.4104 (0.3010) Prec@1 81.250 (84.671)
Test: [148/214] Time 0.111 (0.166) Loss 0.6627 (0.3034) Prec@1 68.750 (84.564)
Test: [149/214] Time 0.101 (0.165) Loss 0.0992 (0.3021) Prec@1 93.750 (84.625)
Test: [150/214] Time 0.112 (0.165) Loss 0.3192 (0.3022) Prec@1 81.250 (84.603)
Test: [151/214] Time 0.083 (0.165) Loss 0.3352 (0.3024) Prec@1 87.500 (84.622)
Test: [152/214] Time 0.159 (0.165) Loss 0.5007 (0.3037) Prec@1 68.750 (84.518)
Test: [153/214] Time 0.053 (0.164) Loss 0.2930 (0.3036) Prec@1 87.500 (84.537)
Test: [154/214] Time 0.054 (0.163) Loss 0.3224 (0.3037) Prec@1 93.750 (84.597)
Test: [155/214] Time 0.053 (0.162) Loss 0.0933 (0.3024) Prec@1 100.000 (84.696)
Test: [156/214] Time 0.057 (0.162) Loss 0.1487 (0.3014) Prec@1 93.750 (84.753)
Test: [157/214] Time 0.054 (0.161) Loss 0.3715 (0.3019) Prec@1 87.500 (84.771)
Test: [158/214] Time 0.120 (0.161) Loss 0.1293 (0.3008) Prec@1 100.000 (84.866)
Test: [159/214] Time 0.105 (0.160) Loss 0.3710 (0.3012) Prec@1 87.500 (84.883)
Test: [160/214] Time 0.088 (0.160) Loss 0.1519 (0.3003) Prec@1 93.750 (84.938)
Test: [161/214] Time 0.049 (0.159) Loss 0.3831 (0.3008) Prec@1 81.250 (84.915)
Test: [162/214] Time 0.053 (0.159) Loss 0.3570 (0.3011) Prec@1 81.250 (84.893)
Test: [163/214] Time 0.050 (0.158) Loss 0.3296 (0.3013) Prec@1 75.000 (84.832)
Test: [164/214] Time 0.051 (0.157) Loss 0.1989 (0.3007) Prec@1 93.750 (84.886)
Test: [165/214] Time 0.053 (0.157) Loss 0.0796 (0.2994) Prec@1 100.000 (84.977)
Test: [166/214] Time 0.058 (0.156) Loss 0.2722 (0.2992) Prec@1 81.250 (84.955)
Test: [167/214] Time 0.109 (0.156) Loss 0.4920 (0.3004) Prec@1 68.750 (84.859)
Test: [168/214] Time 0.084 (0.155) Loss 0.0693 (0.2990) Prec@1 100.000 (84.948)
Test: [169/214] Time 0.108 (0.155) Loss 0.4123 (0.2997) Prec@1 81.250 (84.926)
Test: [170/214] Time 0.058 (0.155) Loss 0.2509 (0.2994) Prec@1 81.250 (84.905)
Test: [171/214] Time 0.046 (0.154) Loss 0.2394 (0.2990) Prec@1 87.500 (84.920)
Test: [172/214] Time 0.062 (0.153) Loss 0.2245 (0.2986) Prec@1 87.500 (84.935)
Test: [173/214] Time 0.072 (0.153) Loss 0.2048 (0.2980) Prec@1 87.500 (84.950)
Test: [174/214] Time 0.104 (0.153) Loss 0.3298 (0.2982) Prec@1 81.250 (84.929)
Test: [175/214] Time 0.066 (0.152) Loss 0.4080 (0.2989) Prec@1 68.750 (84.837)
Test: [176/214] Time 0.048 (0.152) Loss 0.1013 (0.2977) Prec@1 100.000 (84.922)
Test: [177/214] Time 0.062 (0.151) Loss 0.2795 (0.2976) Prec@1 93.750 (84.972)
Test: [178/214] Time 0.086 (0.151) Loss 0.2261 (0.2972) Prec@1 87.500 (84.986)
Test: [179/214] Time 0.117 (0.151) Loss 0.3375 (0.2975) Prec@1 81.250 (84.965)
Test: [180/214] Time 0.122 (0.150) Loss 0.6710 (0.2995) Prec@1 68.750 (84.876)
Test: [181/214] Time 0.057 (0.150) Loss 0.1985 (0.2990) Prec@1 87.500 (84.890)
Test: [182/214] Time 0.106 (0.150) Loss 0.1054 (0.2979) Prec@1 93.750 (84.939)
Test: [183/214] Time 0.129 (0.150) Loss 0.2059 (0.2974) Prec@1 87.500 (84.952)
Test: [184/214] Time 0.118 (0.149) Loss 0.2403 (0.2971) Prec@1 81.250 (84.932)
Test: [185/214] Time 0.110 (0.149) Loss 0.2152 (0.2967) Prec@1 87.500 (84.946)
Test: [186/214] Time 0.143 (0.149) Loss 0.1807 (0.2960) Prec@1 87.500 (84.960)
Test: [187/214] Time 0.111 (0.149) Loss 0.3571 (0.2964) Prec@1 75.000 (84.907)
Test: [188/214] Time 0.120 (0.149) Loss 0.2785 (0.2963) Prec@1 81.250 (84.888)
Test: [189/214] Time 0.093 (0.148) Loss 0.4799 (0.2972) Prec@1 75.000 (84.836)
Test: [190/214] Time 0.134 (0.148) Loss 0.2059 (0.2968) Prec@1 93.750 (84.882)
Test: [191/214] Time 0.060 (0.148) Loss 0.1261 (0.2959) Prec@1 93.750 (84.928)
Test: [192/214] Time 0.121 (0.148) Loss 0.1231 (0.2950) Prec@1 93.750 (84.974)
Test: [193/214] Time 0.053 (0.147) Loss 0.2488 (0.2947) Prec@1 87.500 (84.987)
Test: [194/214] Time 0.052 (0.147) Loss 0.3176 (0.2949) Prec@1 87.500 (85.000)
Test: [195/214] Time 0.054 (0.146) Loss 0.2220 (0.2945) Prec@1 87.500 (85.013)
Test: [196/214] Time 0.105 (0.146) Loss 0.0658 (0.2933) Prec@1 100.000 (85.089)
Test: [197/214] Time 0.124 (0.146) Loss 0.3964 (0.2938) Prec@1 81.250 (85.069)
Test: [198/214] Time 0.103 (0.146) Loss 0.3075 (0.2939) Prec@1 81.250 (85.050)
Test: [199/214] Time 0.068 (0.145) Loss 0.4991 (0.2949) Prec@1 81.250 (85.031)
Test: [200/214] Time 0.056 (0.145) Loss 0.2989 (0.2950) Prec@1 75.000 (84.981)
Test: [201/214] Time 0.047 (0.144) Loss 0.1454 (0.2942) Prec@1 87.500 (84.994)
Test: [202/214] Time 0.069 (0.144) Loss 0.1171 (0.2933) Prec@1 93.750 (85.037)
Test: [203/214] Time 0.057 (0.144) Loss 0.1601 (0.2927) Prec@1 93.750 (85.080)
Test: [204/214] Time 0.047 (0.143) Loss 0.2712 (0.2926) Prec@1 81.250 (85.061)
Test: [205/214] Time 0.052 (0.143) Loss 0.3653 (0.2929) Prec@1 81.250 (85.042)
Test: [206/214] Time 0.074 (0.142) Loss 0.3932 (0.2934) Prec@1 75.000 (84.994)
Test: [207/214] Time 0.071 (0.142) Loss 0.3791 (0.2938) Prec@1 81.250 (84.976)
Test: [208/214] Time 0.094 (0.142) Loss 0.2602 (0.2937) Prec@1 81.250 (84.958)
Test: [209/214] Time 0.095 (0.142) Loss 0.6295 (0.2953) Prec@1 62.500 (84.851)
Test: [210/214] Time 0.068 (0.141) Loss 0.2665 (0.2951) Prec@1 87.500 (84.864)
Test: [211/214] Time 0.058 (0.141) Loss 0.3689 (0.2955) Prec@1 81.250 (84.847)
Test: [212/214] Time 0.041 (0.140) Loss 0.2952 (0.2955) Prec@1 87.500 (84.859)
Test: [213/214] Time 0.558 (0.142) Loss 0.1382 (0.2949) Prec@1 92.308 (84.887)
* Prec@1 84.887
Creating CAM
Fatal error in main loop
Traceback (most recent call last):
File "test.py", line 173, in main
run_model(args, q)
File "test.py", line 255, in run_model
prec1 = validate(val_loader, model, criterion, normalize_factor, args ,q)
File "test.py", line 313, in validate
f"eval_results/{args['task']}/error_case/cam" , model, normalize_factor, cam_class=args['predict']['cam-class'])
File "/home/yh9468/detection/trainer/visualize/grad_cam.py", line 183, in make_grad_cam
probs, ids = bp.forward(images) # sorted
File "/home/yh9468/detection/trainer/visualize/grad_cam_utils.py", line 57, in forward
return super(BackPropagation, self).forward(self.image)
File "/home/yh9468/detection/trainer/visualize/grad_cam_utils.py", line 31, in forward
self.logits = self.model(image)
File "/usr/local/lib/python3.6/dist-packages/torch/nn/modules/module.py", line 493, in __call__
result = self.forward(*input, **kwargs)
File "/usr/local/lib/python3.6/dist-packages/torch/nn/parallel/data_parallel.py", line 152, in forward
outputs = self.parallel_apply(replicas, inputs, kwargs)
File "/usr/local/lib/python3.6/dist-packages/torch/nn/parallel/data_parallel.py", line 162, in parallel_apply
return parallel_apply(replicas, inputs, kwargs, self.device_ids[:len(replicas)])
File "/usr/local/lib/python3.6/dist-packages/torch/nn/parallel/parallel_apply.py", line 83, in parallel_apply
raise output
File "/usr/local/lib/python3.6/dist-packages/torch/nn/parallel/parallel_apply.py", line 59, in _worker
output = module(*input, **kwargs)
File "/usr/local/lib/python3.6/dist-packages/torch/nn/modules/module.py", line 493, in __call__
result = self.forward(*input, **kwargs)
File "/home/yh9468/detection/trainer/model.py", line 385, in forward
x = self.extract_features(inputs)
File "/home/yh9468/detection/trainer/model.py", line 374, in extract_features
x = block(x, drop_connect_rate=drop_connect_rate)
File "/usr/local/lib/python3.6/dist-packages/torch/nn/modules/module.py", line 493, in __call__
result = self.forward(*input, **kwargs)
File "/home/yh9468/detection/trainer/model.py", line 272, in forward
x = self._swish(self._bn0(self._expand_conv(inputs)))
File "/usr/local/lib/python3.6/dist-packages/torch/nn/modules/module.py", line 493, in __call__
result = self.forward(*input, **kwargs)
File "/home/yh9468/detection/trainer/utils.py", line 139, in forward
return SwishImplementation.apply(x)
File "/home/yh9468/detection/trainer/utils.py", line 126, in forward
result = i * torch.sigmoid(i)
RuntimeError: CUDA out of memory. Tried to allocate 262.00 MiB (GPU 0; 10.92 GiB total capacity; 7.22 GiB already allocated; 51.56 MiB free; 16.05 MiB cached)
[eval] failed
Number of model parameters: 154706
=> loading checkpoint 'output/Error/4374_model=MobilenetV3-ep=3000-block=4/model_best.pth.tar'
=> loaded checkpoint 'output/Error/4374_model=MobilenetV3-ep=3000-block=4/model_best.pth.tar' (epoch 2348)
Test: [0/14] Time 1.123 (1.123) Loss 0.1061 (0.1061) Prec@1 94.922 (94.922)
Test: [1/14] Time 0.054 (0.588) Loss 0.1432 (0.1246) Prec@1 94.141 (94.531)
Test: [2/14] Time 0.033 (0.403) Loss 0.0830 (0.1107) Prec@1 97.656 (95.573)
Test: [3/14] Time 0.017 (0.307) Loss 0.0947 (0.1067) Prec@1 96.094 (95.703)
Test: [4/14] Time 0.027 (0.251) Loss 0.1500 (0.1154) Prec@1 92.969 (95.156)
Test: [5/14] Time 0.018 (0.212) Loss 0.1165 (0.1156) Prec@1 95.703 (95.247)
Test: [6/14] Time 0.029 (0.186) Loss 0.1491 (0.1204) Prec@1 94.531 (95.145)
Test: [7/14] Time 0.056 (0.170) Loss 0.1830 (0.1282) Prec@1 91.797 (94.727)
Test: [8/14] Time 0.041 (0.155) Loss 0.1621 (0.1320) Prec@1 94.531 (94.705)
Test: [9/14] Time 0.025 (0.142) Loss 0.0904 (0.1278) Prec@1 96.484 (94.883)
Test: [10/14] Time 0.030 (0.132) Loss 0.1743 (0.1320) Prec@1 91.016 (94.531)
Test: [11/14] Time 0.016 (0.122) Loss 0.1148 (0.1306) Prec@1 95.312 (94.596)
Test: [12/14] Time 0.017 (0.114) Loss 0.1283 (0.1304) Prec@1 94.922 (94.621)
Test: [13/14] Time 0.096 (0.113) Loss 0.1226 (0.1302) Prec@1 95.699 (94.651)
* Prec@1 94.651
Creating CAM
Best accuracy: 95.3216364899574
[eval] done
Number of model parameters: 154706
=> loading checkpoint 'output/Error/4374_model=MobilenetV3-ep=3000-block=4/model_best.pth.tar'
=> loaded checkpoint 'output/Error/4374_model=MobilenetV3-ep=3000-block=4/model_best.pth.tar' (epoch 2348)
Test: [0/14] Time 0.926 (0.926) Loss 0.1238 (0.1238) Prec@1 94.922 (94.922)
Test: [1/14] Time 0.039 (0.483) Loss 0.1387 (0.1312) Prec@1 94.141 (94.531)
Test: [2/14] Time 0.020 (0.329) Loss 0.1748 (0.1458) Prec@1 92.578 (93.880)
Test: [3/14] Time 0.012 (0.249) Loss 0.0883 (0.1314) Prec@1 97.266 (94.727)
Test: [4/14] Time 0.015 (0.203) Loss 0.1191 (0.1289) Prec@1 95.703 (94.922)
Test: [5/14] Time 0.014 (0.171) Loss 0.1319 (0.1294) Prec@1 94.922 (94.922)
Test: [6/14] Time 0.036 (0.152) Loss 0.1553 (0.1331) Prec@1 92.578 (94.587)
Test: [7/14] Time 0.013 (0.135) Loss 0.1337 (0.1332) Prec@1 95.312 (94.678)
Test: [8/14] Time 0.026 (0.123) Loss 0.1282 (0.1327) Prec@1 94.922 (94.705)
Test: [9/14] Time 0.031 (0.113) Loss 0.1576 (0.1352) Prec@1 93.750 (94.609)
Test: [10/14] Time 0.029 (0.106) Loss 0.1193 (0.1337) Prec@1 95.312 (94.673)
Test: [11/14] Time 0.025 (0.099) Loss 0.1008 (0.1310) Prec@1 95.703 (94.759)
Test: [12/14] Time 0.023 (0.093) Loss 0.1282 (0.1307) Prec@1 93.750 (94.681)
Test: [13/14] Time 0.084 (0.093) Loss 0.1103 (0.1302) Prec@1 93.548 (94.651)
* Prec@1 94.651
Creating CAM
Best accuracy: 95.3216364899574
[eval] done
Number of model parameters: 154706
=> loading checkpoint 'output/Error/47098_model=MobilenetV3-ep=3000-block=4/model_best.pth.tar'
=> loaded checkpoint 'output/Error/47098_model=MobilenetV3-ep=3000-block=4/model_best.pth.tar' (epoch 1389)
Test: [0/2] Time 1.166 (1.166) Loss 0.1193 (0.1193) Prec@1 96.094 (96.094)
Test: [1/2] Time 0.040 (0.603) Loss 0.1689 (0.1317) Prec@1 95.349 (95.906)
* Prec@1 95.906
Creating CAM
Best accuracy: 95.90643257007264
[eval] done
Number of model parameters: 154706
=> loading checkpoint 'output/Error/47098_model=MobilenetV3-ep=3000-block=4/model_best.pth.tar'
=> loaded checkpoint 'output/Error/47098_model=MobilenetV3-ep=3000-block=4/model_best.pth.tar' (epoch 1389)
Test: [0/2] Time 1.166 (1.166) Loss 0.1175 (0.1175) Prec@1 96.094 (96.094)
Test: [1/2] Time 0.049 (0.607) Loss 0.1742 (0.1317) Prec@1 95.349 (95.906)
* Prec@1 95.906
Creating CAM
Best accuracy: 95.90643257007264
[eval] done
Number of model parameters: 154706
=> loading checkpoint 'output/Error/47098_model=MobilenetV3-ep=3000-block=4/model_best.pth.tar'
=> loaded checkpoint 'output/Error/47098_model=MobilenetV3-ep=3000-block=4/model_best.pth.tar' (epoch 1389)
Test: [0/14] Time 1.217 (1.217) Loss 0.2104 (0.2104) Prec@1 89.844 (89.844)
Test: [1/14] Time 0.019 (0.618) Loss 0.1402 (0.1753) Prec@1 94.531 (92.188)
Test: [2/14] Time 0.012 (0.416) Loss 0.1672 (0.1726) Prec@1 93.359 (92.578)
Test: [3/14] Time 0.011 (0.315) Loss 0.1261 (0.1610) Prec@1 94.531 (93.066)
Test: [4/14] Time 0.011 (0.254) Loss 0.1214 (0.1531) Prec@1 95.312 (93.516)
Test: [5/14] Time 0.012 (0.214) Loss 0.1885 (0.1590) Prec@1 92.578 (93.359)
Test: [6/14] Time 0.012 (0.185) Loss 0.1832 (0.1624) Prec@1 92.188 (93.192)
Test: [7/14] Time 0.011 (0.163) Loss 0.1629 (0.1625) Prec@1 93.359 (93.213)
Test: [8/14] Time 0.010 (0.146) Loss 0.1113 (0.1568) Prec@1 95.703 (93.490)
Test: [9/14] Time 0.012 (0.133) Loss 0.1671 (0.1578) Prec@1 92.188 (93.359)
Test: [10/14] Time 0.011 (0.122) Loss 0.1282 (0.1551) Prec@1 94.922 (93.501)
Test: [11/14] Time 0.011 (0.112) Loss 0.1508 (0.1548) Prec@1 93.750 (93.522)
Test: [12/14] Time 0.013 (0.105) Loss 0.2146 (0.1594) Prec@1 91.406 (93.359)
Test: [13/14] Time 0.037 (0.100) Loss 0.1688 (0.1596) Prec@1 93.548 (93.365)
* Prec@1 93.365
Creating CAM
Best accuracy: 95.90643257007264
[eval] done
Number of model parameters: 154706
=> loading checkpoint 'output/Error/49766_model=MobilenetV3-ep=3000-block=4/checkpoint.pth.tar'
=> loaded checkpoint 'output/Error/49766_model=MobilenetV3-ep=3000-block=4/checkpoint.pth.tar' (epoch 3000)
Test: [0/14] Time 1.085 (1.085) Loss 0.2096 (0.2096) Prec@1 95.312 (95.312)
Test: [1/14] Time 0.039 (0.562) Loss 0.2121 (0.2109) Prec@1 94.531 (94.922)
Test: [2/14] Time 0.016 (0.380) Loss 0.2051 (0.2090) Prec@1 96.094 (95.312)
Test: [3/14] Time 0.014 (0.289) Loss 0.2188 (0.2114) Prec@1 92.188 (94.531)
Test: [4/14] Time 0.012 (0.233) Loss 0.2191 (0.2130) Prec@1 93.750 (94.375)
Test: [5/14] Time 0.011 (0.196) Loss 0.2372 (0.2170) Prec@1 93.750 (94.271)
Test: [6/14] Time 0.012 (0.170) Loss 0.2187 (0.2172) Prec@1 92.578 (94.029)
Test: [7/14] Time 0.010 (0.150) Loss 0.1857 (0.2133) Prec@1 95.312 (94.189)
Test: [8/14] Time 0.010 (0.134) Loss 0.2194 (0.2140) Prec@1 95.312 (94.314)
Test: [9/14] Time 0.011 (0.122) Loss 0.2345 (0.2160) Prec@1 92.969 (94.180)
Test: [10/14] Time 0.012 (0.112) Loss 0.2408 (0.2183) Prec@1 92.188 (93.999)
Test: [11/14] Time 0.011 (0.104) Loss 0.2273 (0.2190) Prec@1 93.750 (93.978)
Test: [12/14] Time 0.011 (0.097) Loss 0.2092 (0.2183) Prec@1 94.141 (93.990)
Test: [13/14] Time 0.044 (0.093) Loss 0.2458 (0.2190) Prec@1 90.323 (93.891)
* Prec@1 93.891
Creating CAM
Best accuracy: 93.89067538834844
[eval] done
Number of model parameters: 154706
=> loading checkpoint 'output/Error/49766_model=MobilenetV3-ep=3000-block=4/checkpoint.pth.tar'
=> loaded checkpoint 'output/Error/49766_model=MobilenetV3-ep=3000-block=4/checkpoint.pth.tar' (epoch 3000)
Test: [0/2] Time 1.041 (1.041) Loss 0.2489 (0.2489) Prec@1 91.406 (91.406)
Test: [1/2] Time 0.052 (0.546) Loss 0.2858 (0.2582) Prec@1 86.047 (90.058)
* Prec@1 90.058
Creating CAM
Best accuracy: 93.27485362270423
[eval] done
Number of model parameters: 154706
=> loading checkpoint 'output/Error/49766_model=MobilenetV3-ep=3000-block=4/model_best.pth.tar'
=> loaded checkpoint 'output/Error/49766_model=MobilenetV3-ep=3000-block=4/model_best.pth.tar' (epoch 2117)
Test: [0/2] Time 1.039 (1.039) Loss 0.2551 (0.2551) Prec@1 93.359 (93.359)
Test: [1/2] Time 0.061 (0.550) Loss 0.2621 (0.2569) Prec@1 93.023 (93.275)
* Prec@1 93.275
Creating CAM
Best accuracy: 93.27485362270423
[eval] done
Number of model parameters: 154706
=> loading checkpoint 'output/Error/49766_model=MobilenetV3-ep=3000-block=4/model_best.pth.tar'
=> loaded checkpoint 'output/Error/49766_model=MobilenetV3-ep=3000-block=4/model_best.pth.tar' (epoch 2117)
Test: [0/14] Time 1.156 (1.156) Loss 0.2277 (0.2277) Prec@1 94.531 (94.531)
Test: [1/14] Time 0.020 (0.588) Loss 0.2137 (0.2207) Prec@1 94.531 (94.531)
Test: [2/14] Time 0.014 (0.397) Loss 0.2680 (0.2365) Prec@1 91.016 (93.359)
Test: [3/14] Time 0.013 (0.301) Loss 0.2404 (0.2375) Prec@1 92.188 (93.066)
Test: [4/14] Time 0.013 (0.243) Loss 0.2448 (0.2389) Prec@1 91.406 (92.734)
Test: [5/14] Time 0.014 (0.205) Loss 0.2563 (0.2418) Prec@1 89.844 (92.253)
Test: [6/14] Time 0.014 (0.178) Loss 0.2798 (0.2473) Prec@1 89.453 (91.853)
Test: [7/14] Time 0.012 (0.157) Loss 0.2298 (0.2451) Prec@1 92.969 (91.992)
Test: [8/14] Time 0.011 (0.141) Loss 0.2395 (0.2445) Prec@1 94.531 (92.274)
Test: [9/14] Time 0.013 (0.128) Loss 0.2545 (0.2455) Prec@1 90.625 (92.109)
Test: [10/14] Time 0.021 (0.118) Loss 0.2275 (0.2438) Prec@1 93.359 (92.223)
Test: [11/14] Time 0.015 (0.110) Loss 0.2461 (0.2440) Prec@1 91.406 (92.155)
Test: [12/14] Time 0.010 (0.102) Loss 0.2224 (0.2424) Prec@1 94.531 (92.338)
Test: [13/14] Time 0.036 (0.097) Loss 0.2299 (0.2420) Prec@1 92.473 (92.341)
* Prec@1 92.341
Creating CAM
Best accuracy: 93.27485362270423
[eval] done
Number of model parameters: 154706
=> loading checkpoint 'output/Error/49766_model=MobilenetV3-ep=3000-block=4/checkpoint.pth.tar'
=> loaded checkpoint 'output/Error/49766_model=MobilenetV3-ep=3000-block=4/checkpoint.pth.tar' (epoch 3000)
Test: [0/14] Time 1.152 (1.152) Loss 0.2329 (0.2329) Prec@1 93.359 (93.359)
Test: [1/14] Time 0.018 (0.585) Loss 0.2266 (0.2298) Prec@1 94.531 (93.945)
Test: [2/14] Time 0.013 (0.394) Loss 0.2336 (0.2310) Prec@1 91.797 (93.229)
Test: [3/14] Time 0.012 (0.299) Loss 0.2259 (0.2298) Prec@1 92.969 (93.164)
Test: [4/14] Time 0.012 (0.241) Loss 0.2114 (0.2261) Prec@1 93.750 (93.281)
Test: [5/14] Time 0.012 (0.203) Loss 0.2309 (0.2269) Prec@1 92.188 (93.099)
Test: [6/14] Time 0.010 (0.175) Loss 0.1863 (0.2211) Prec@1 96.484 (93.583)
Test: [7/14] Time 0.012 (0.155) Loss 0.2356 (0.2229) Prec@1 92.969 (93.506)
Test: [8/14] Time 0.012 (0.139) Loss 0.2073 (0.2212) Prec@1 92.969 (93.446)
Test: [9/14] Time 0.010 (0.126) Loss 0.2034 (0.2194) Prec@1 96.484 (93.750)
Test: [10/14] Time 0.011 (0.116) Loss 0.2154 (0.2190) Prec@1 94.922 (93.857)
Test: [11/14] Time 0.012 (0.107) Loss 0.2193 (0.2191) Prec@1 93.359 (93.815)
Test: [12/14] Time 0.011 (0.100) Loss 0.2111 (0.2184) Prec@1 94.141 (93.840)
Test: [13/14] Time 0.052 (0.096) Loss 0.2402 (0.2190) Prec@1 95.699 (93.891)
* Prec@1 93.891
Best accuracy: 93.89067535266581
[eval] done
Number of model parameters: 154706
=> loading checkpoint 'output/Error/47098_model=MobilenetV3-ep=3000-block=4/model_best.pth.tar'
=> loaded checkpoint 'output/Error/47098_model=MobilenetV3-ep=3000-block=4/model_best.pth.tar' (epoch 1389)
Test: [0/2] Time 1.056 (1.056) Loss 0.1371 (0.1371) Prec@1 95.703 (95.703)
Test: [1/2] Time 0.057 (0.556) Loss 0.1158 (0.1317) Prec@1 96.512 (95.906)
* Prec@1 95.906
Best accuracy: 95.90643257007264
[eval] done
Number of model parameters: 154706
=> loading checkpoint 'output/Error/47098_model=MobilenetV3-ep=3000-block=4/model_best.pth.tar'
=> loaded checkpoint 'output/Error/47098_model=MobilenetV3-ep=3000-block=4/model_best.pth.tar' (epoch 1389)
Test: [0/2] Time 1.038 (1.038) Loss 0.1294 (0.1294) Prec@1 96.094 (96.094)
Test: [1/2] Time 0.039 (0.538) Loss 0.1388 (0.1317) Prec@1 95.349 (95.906)
* Prec@1 95.906
Best accuracy: 95.90643257007264
[eval] done
Number of model parameters: 154706
=> loading checkpoint 'output/Error/12613_model=MobilenetV3-ep=3000-block=4/model_best.pth.tar'
=> loaded checkpoint 'output/Error/12613_model=MobilenetV3-ep=3000-block=4/model_best.pth.tar' (epoch 2877)
Test: [0/2] Time 0.814 (0.814) Loss 0.0781 (0.0781) Prec@1 98.438 (98.438)
Test: [1/2] Time 0.047 (0.430) Loss 0.1064 (0.0846) Prec@1 97.368 (98.193)
* Prec@1 98.193
Best accuracy: 98.19277163585984
[eval] done
Number of model parameters: 154706
=> loading checkpoint 'output/Error/12613_model=MobilenetV3-ep=3000-block=4/model_best.pth.tar'
=> loaded checkpoint 'output/Error/12613_model=MobilenetV3-ep=3000-block=4/model_best.pth.tar' (epoch 2877)
Test: [0/2] Time 0.796 (0.796) Loss 0.0715 (0.0715) Prec@1 98.047 (98.047)
Test: [1/2] Time 0.046 (0.421) Loss 0.1287 (0.0846) Prec@1 98.684 (98.193)
* Prec@1 98.193
Best accuracy: 98.1927713600986
[eval] done
Number of model parameters: 154706
=> loading checkpoint 'output/Error/12613_model=MobilenetV3-ep=3000-block=4/model_best.pth.tar'
=> loaded checkpoint 'output/Error/12613_model=MobilenetV3-ep=3000-block=4/model_best.pth.tar' (epoch 2877)
Fatal error in main loop
Traceback (most recent call last):
File "/home/yh9468/detection/trainer/test.py", line 181, in main
run_model(args, q)
File "/home/yh9468/detection/trainer/test.py", line 263, in run_model
prec1 = validate(val_loader, model, criterion, normalize_factor, args ,q)
File "/home/yh9468/detection/trainer/test.py", line 296, in validate
save_error_case(output.data, target, path, args, topk=(1,), input=input, save_correct=True)
File "/home/yh9468/detection/trainer/test.py", line 373, in save_error_case
cv2.imwrite(f"eval_results/{args['task']}/correct_case/idx_{correct_case_idx}_label_{class_arr[target[idx]]}_real.bmp" ,img)
NameError: name 'correct_case_idx' is not defined
[validate_2020-03-26-16-50-29] failed
Number of model parameters: 154706
=> loading checkpoint 'output/Error/12613_model=MobilenetV3-ep=3000-block=4/model_best.pth.tar'
=> loaded checkpoint 'output/Error/12613_model=MobilenetV3-ep=3000-block=4/model_best.pth.tar' (epoch 2877)
Fatal error in main loop
Traceback (most recent call last):
File "/home/yh9468/detection/trainer/test.py", line 184, in main
run_model(args, q)
File "/home/yh9468/detection/trainer/test.py", line 266, in run_model
prec1 = validate(val_loader, model, criterion, normalize_factor, args ,q)
File "/home/yh9468/detection/trainer/test.py", line 299, in validate
save_error_case(output.data, target, path, args, topk=(1,), input=input, save_correct=True)
File "/home/yh9468/detection/trainer/test.py", line 376, in save_error_case
cv2.imwrite(f"eval_results/{args['task']}/correct_case/idx_{correct_case_idx}_label_{class_arr[target[idx]]}_real.bmp" ,img)
NameError: name 'correct_case_idx' is not defined
[validate_2020-03-26-16-55-37] failed
Number of model parameters: 154706
=> loading checkpoint 'output/Error/12613_model=MobilenetV3-ep=3000-block=4/model_best.pth.tar'
=> loaded checkpoint 'output/Error/12613_model=MobilenetV3-ep=3000-block=4/model_best.pth.tar' (epoch 2877)
* Prec@1 98.193
* Prec@1 98.193
Best accuracy: 98.19277163585984
[validate_2020-03-26-16-57-52] done
[validate_2020-03-26-16-57-52] done
start test using path : ../data/Fourth_data/demo
Test start
loading checkpoint...
checkpoint already loaded!
Number of model parameters: 154706
=> loading checkpoint 'output/Error/12613_model=MobilenetV3-ep=3000-block=4/model_best.pth.tar'
=> loaded checkpoint 'output/Error/12613_model=MobilenetV3-ep=3000-block=4/model_best.pth.tar' (epoch 2877)
* Prec@1 98.193
* Prec@1 98.193
Best accuracy: 98.1927713600986
[validate_2020-03-26-17-02-22] done
[validate_2020-03-26-17-02-22] done
start test using path : ../data/Fourth_data/demo
Test start
loading checkpoint...
checkpoint already loaded!
start test
data path directory is ../data/Fourth_data/demo
finish test
start test using path : ../data/Fourth_data/demo
Test start
loading checkpoint...
checkpoint already loaded!
start test
data path directory is ../data/Fourth_data/demo
finish test
Number of model parameters: 154706
=> loading checkpoint 'output/Error/12613_model=MobilenetV3-ep=3000-block=4/model_best.pth.tar'
=> loaded checkpoint 'output/Error/12613_model=MobilenetV3-ep=3000-block=4/model_best.pth.tar' (epoch 2877)
* Prec@1 98.193
* Prec@1 98.193
Best accuracy: 98.1927713600986
[validate_2020-03-26-17-09-14] done
[validate_2020-03-26-17-09-14] done
Number of model parameters: 154706
=> loading checkpoint 'output/Error/12613_model=MobilenetV3-ep=3000-block=4/model_best.pth.tar'
=> loaded checkpoint 'output/Error/12613_model=MobilenetV3-ep=3000-block=4/model_best.pth.tar' (epoch 2877)
* Prec@1 98.193
* Prec@1 98.193
Best accuracy: 98.19277163585984
[validate_2020-03-26-17-09-34] done
[validate_2020-03-26-17-09-34] done
start test using path : ../data/Fourth_data/demo
Test start
loading checkpoint...
checkpoint already loaded!
start test
data path directory is ../data/Fourth_data/demo
finish test
set Type
start test using path : ../data/Fourth_data/demo
Test start
loading checkpoint...
checkpoint already loaded!
start test
data path directory is ../data/Fourth_data/demo
finish test
train start
load yml file
2020-03-26-17-10-28
use seed 825
use dataset : ../data/Fourth_data/All
{'task': 'All/train_2020-03-26-17-10-28_model=MobilenetV3-ep=3000-block=6', 'modelname': 'MobilenetV3', 'output': 'output', 'checkpoint': 'output/All/16166_model=MobilenetV3-ep=3000-block=6/model_best.pth.tar', 'gpu': [2], 'data': {'train': '../data/Fourth_data/All', 'val': '../data/Fourth_data/All', 'test': '../data/Fourth_data/All'}, 'train': {'epochs': 3000, 'start-epoch': 0, 'batch-size': 256, 'worker': 16, 'weight': [2.0, 4.0, 1.0, 1.0, 3.0, 1.0, 1.0], 'resume': '', 'augment': True, 'size': 224, 'confidence': False}, 'predict': {'batch-size': 256, 'worker': 64, 'cam-class': 'Crack', 'cam': False, 'normalize': True}, 'optimizer': {'lr': 0.1, 'momentum': 0.9, 'weight_decay': 0.0001}, 'loss': {'gamma': 2.0, 'alpha': 0.8}, 'model': {'blocks': 6, 'class': 7}, 'etc': {'tensorboard': False, 'print_freq': 10}, 'id': 'train_2020-03-26-17-10-28'}
using normalize
using no dropout
using SGD
Number of model parameters: 461559
Epoch: [0][0/12] Time 3.022 (3.022) Loss 1.9330 (1.9330) Prec@1 16.016 (16.016)
Epoch: [0][10/12] Time 0.159 (0.411) Loss 1.6084 (1.7482) Prec@1 23.828 (17.898)
Test: [0/2] Time 1.651 (1.651) Loss 1.8596 (1.8596) Prec@1 10.938 (10.938)
* epoch: 0 Prec@1 12.012
* epoch: 0 Prec@1 12.012
Checkpoint Saved: output/All/train_2020-03-26-17-10-28_model=MobilenetV3-ep=3000-block=6/checkpoint.pth.tar
New Best Checkpoint saved: output/All/train_2020-03-26-17-10-28_model=MobilenetV3-ep=3000-block=6/model_best.pth.tar
Epoch: [1][0/12] Time 1.962 (1.962) Loss 1.5773 (1.5773) Prec@1 28.516 (28.516)
Epoch: [1][10/12] Time 0.158 (0.323) Loss 1.6759 (1.5514) Prec@1 20.312 (26.136)
Test: [0/2] Time 1.653 (1.653) Loss 1.8462 (1.8462) Prec@1 31.250 (31.250)
* epoch: 1 Prec@1 30.030
* epoch: 1 Prec@1 30.030
Checkpoint Saved: output/All/train_2020-03-26-17-10-28_model=MobilenetV3-ep=3000-block=6/checkpoint.pth.tar
New Best Checkpoint saved: output/All/train_2020-03-26-17-10-28_model=MobilenetV3-ep=3000-block=6/model_best.pth.tar
Epoch: [2][0/12] Time 1.952 (1.952) Loss 1.5458 (1.5458) Prec@1 26.562 (26.562)
Epoch: [2][10/12] Time 0.157 (0.331) Loss 1.2252 (1.3803) Prec@1 42.969 (31.889)
Test: [0/2] Time 1.674 (1.674) Loss 1.7407 (1.7407) Prec@1 30.859 (30.859)
* epoch: 2 Prec@1 32.733
* epoch: 2 Prec@1 32.733
Checkpoint Saved: output/All/train_2020-03-26-17-10-28_model=MobilenetV3-ep=3000-block=6/checkpoint.pth.tar
New Best Checkpoint saved: output/All/train_2020-03-26-17-10-28_model=MobilenetV3-ep=3000-block=6/model_best.pth.tar
Epoch: [3][0/12] Time 1.960 (1.960) Loss 1.2237 (1.2237) Prec@1 39.062 (39.062)
Epoch: [3][10/12] Time 0.155 (0.323) Loss 1.1566 (1.2237) Prec@1 41.797 (41.193)
Test: [0/2] Time 1.687 (1.687) Loss 1.7368 (1.7368) Prec@1 22.266 (22.266)
* epoch: 3 Prec@1 22.823
* epoch: 3 Prec@1 22.823
Checkpoint Saved: output/All/train_2020-03-26-17-10-28_model=MobilenetV3-ep=3000-block=6/checkpoint.pth.tar
Epoch: [4][0/12] Time 1.967 (1.967) Loss 1.3065 (1.3065) Prec@1 48.828 (48.828)
Epoch: [4][10/12] Time 0.162 (0.335) Loss 1.0501 (1.1183) Prec@1 58.594 (53.906)
Test: [0/2] Time 1.665 (1.665) Loss 1.1960 (1.1960) Prec@1 53.516 (53.516)
* epoch: 4 Prec@1 52.553
* epoch: 4 Prec@1 52.553
Checkpoint Saved: output/All/train_2020-03-26-17-10-28_model=MobilenetV3-ep=3000-block=6/checkpoint.pth.tar
New Best Checkpoint saved: output/All/train_2020-03-26-17-10-28_model=MobilenetV3-ep=3000-block=6/model_best.pth.tar
Epoch: [5][0/12] Time 1.930 (1.930) Loss 1.0283 (1.0283) Prec@1 61.719 (61.719)
Epoch: [5][10/12] Time 0.159 (0.320) Loss 1.0155 (1.0390) Prec@1 58.594 (60.227)
Test: [0/2] Time 1.692 (1.692) Loss 1.2309 (1.2309) Prec@1 60.938 (60.938)
* epoch: 5 Prec@1 60.360
* epoch: 5 Prec@1 60.360
Checkpoint Saved: output/All/train_2020-03-26-17-10-28_model=MobilenetV3-ep=3000-block=6/checkpoint.pth.tar
New Best Checkpoint saved: output/All/train_2020-03-26-17-10-28_model=MobilenetV3-ep=3000-block=6/model_best.pth.tar
Epoch: [6][0/12] Time 2.214 (2.214) Loss 0.9515 (0.9515) Prec@1 64.844 (64.844)
Epoch: [6][10/12] Time 0.158 (0.346) Loss 0.7833 (0.9050) Prec@1 75.781 (68.999)
Test: [0/2] Time 1.663 (1.663) Loss 0.9625 (0.9625) Prec@1 49.609 (49.609)
* epoch: 6 Prec@1 49.850
* epoch: 6 Prec@1 49.850
Checkpoint Saved: output/All/train_2020-03-26-17-10-28_model=MobilenetV3-ep=3000-block=6/checkpoint.pth.tar
Epoch: [7][0/12] Time 1.919 (1.919) Loss 0.7668 (0.7668) Prec@1 76.172 (76.172)
Epoch: [7][10/12] Time 0.158 (0.319) Loss 0.9260 (0.8527) Prec@1 63.672 (71.768)
Test: [0/2] Time 1.675 (1.675) Loss 1.1891 (1.1891) Prec@1 61.328 (61.328)
* epoch: 7 Prec@1 58.859
* epoch: 7 Prec@1 58.859
Checkpoint Saved: output/All/train_2020-03-26-17-10-28_model=MobilenetV3-ep=3000-block=6/checkpoint.pth.tar
Epoch: [8][0/12] Time 2.244 (2.244) Loss 0.8546 (0.8546) Prec@1 72.266 (72.266)
Epoch: [8][10/12] Time 0.159 (0.348) Loss 0.7712 (0.8669) Prec@1 76.562 (71.094)
Test: [0/2] Time 1.707 (1.707) Loss 0.9092 (0.9092) Prec@1 63.281 (63.281)
* epoch: 8 Prec@1 62.462
* epoch: 8 Prec@1 62.462
Checkpoint Saved: output/All/train_2020-03-26-17-10-28_model=MobilenetV3-ep=3000-block=6/checkpoint.pth.tar
New Best Checkpoint saved: output/All/train_2020-03-26-17-10-28_model=MobilenetV3-ep=3000-block=6/model_best.pth.tar
Epoch: [9][0/12] Time 1.925 (1.925) Loss 0.7878 (0.7878) Prec@1 78.516 (78.516)
Epoch: [9][10/12] Time 0.160 (0.325) Loss 0.7174 (0.7807) Prec@1 69.922 (73.509)
Test: [0/2] Time 1.707 (1.707) Loss 0.9544 (0.9544) Prec@1 62.891 (62.891)
* epoch: 9 Prec@1 60.661
* epoch: 9 Prec@1 60.661
Checkpoint Saved: output/All/train_2020-03-26-17-10-28_model=MobilenetV3-ep=3000-block=6/checkpoint.pth.tar
Epoch: [10][0/12] Time 2.220 (2.220) Loss 0.9374 (0.9374) Prec@1 67.188 (67.188)
Epoch: [10][10/12] Time 0.159 (0.347) Loss 0.7273 (0.7483) Prec@1 80.078 (75.497)
Test: [0/2] Time 1.696 (1.696) Loss 0.9895 (0.9895) Prec@1 47.656 (47.656)
* epoch: 10 Prec@1 48.649
* epoch: 10 Prec@1 48.649
Checkpoint Saved: output/All/train_2020-03-26-17-10-28_model=MobilenetV3-ep=3000-block=6/checkpoint.pth.tar
Epoch: [11][0/12] Time 1.974 (1.974) Loss 0.6920 (0.6920) Prec@1 75.391 (75.391)
Epoch: [11][10/12] Time 0.154 (0.324) Loss 0.7892 (0.6962) Prec@1 71.484 (75.781)
Test: [0/2] Time 1.665 (1.665) Loss 1.1736 (1.1736) Prec@1 78.125 (78.125)
* epoch: 11 Prec@1 77.778
* epoch: 11 Prec@1 77.778
Checkpoint Saved: output/All/train_2020-03-26-17-10-28_model=MobilenetV3-ep=3000-block=6/checkpoint.pth.tar
New Best Checkpoint saved: output/All/train_2020-03-26-17-10-28_model=MobilenetV3-ep=3000-block=6/model_best.pth.tar
Epoch: [12][0/12] Time 1.927 (1.927) Loss 0.7375 (0.7375) Prec@1 78.906 (78.906)
Epoch: [12][10/12] Time 0.159 (0.328) Loss 0.6453 (0.6972) Prec@1 79.688 (77.308)
Test: [0/2] Time 1.703 (1.703) Loss 0.7701 (0.7701) Prec@1 79.688 (79.688)
* epoch: 12 Prec@1 79.279
* epoch: 12 Prec@1 79.279
Checkpoint Saved: output/All/train_2020-03-26-17-10-28_model=MobilenetV3-ep=3000-block=6/checkpoint.pth.tar
New Best Checkpoint saved: output/All/train_2020-03-26-17-10-28_model=MobilenetV3-ep=3000-block=6/model_best.pth.tar
Epoch: [13][0/12] Time 1.983 (1.983) Loss 0.6737 (0.6737) Prec@1 79.688 (79.688)
Epoch: [13][10/12] Time 0.158 (0.325) Loss 0.6125 (0.6647) Prec@1 79.688 (78.942)
Test: [0/2] Time 1.673 (1.673) Loss 1.7411 (1.7411) Prec@1 44.922 (44.922)
* epoch: 13 Prec@1 44.745
* epoch: 13 Prec@1 44.745
Checkpoint Saved: output/All/train_2020-03-26-17-10-28_model=MobilenetV3-ep=3000-block=6/checkpoint.pth.tar
Epoch: [14][0/12] Time 1.969 (1.969) Loss 0.7033 (0.7033) Prec@1 76.953 (76.953)
Epoch: [14][10/12] Time 0.160 (0.328) Loss 0.7069 (0.6706) Prec@1 78.516 (78.196)
Test: [0/2] Time 1.689 (1.689) Loss 0.7330 (0.7330) Prec@1 80.078 (80.078)
* epoch: 14 Prec@1 80.781
* epoch: 14 Prec@1 80.781
Checkpoint Saved: output/All/train_2020-03-26-17-10-28_model=MobilenetV3-ep=3000-block=6/checkpoint.pth.tar
New Best Checkpoint saved: output/All/train_2020-03-26-17-10-28_model=MobilenetV3-ep=3000-block=6/model_best.pth.tar
Epoch: [15][0/12] Time 1.957 (1.957) Loss 0.6477 (0.6477) Prec@1 76.562 (76.562)
Epoch: [15][10/12] Time 0.160 (0.323) Loss 0.6251 (0.6128) Prec@1 81.641 (80.007)
Test: [0/2] Time 1.675 (1.675) Loss 0.7821 (0.7821) Prec@1 76.562 (76.562)
* epoch: 15 Prec@1 76.877
* epoch: 15 Prec@1 76.877
Checkpoint Saved: output/All/train_2020-03-26-17-10-28_model=MobilenetV3-ep=3000-block=6/checkpoint.pth.tar
Epoch: [16][0/12] Time 2.133 (2.133) Loss 0.6314 (0.6314) Prec@1 77.734 (77.734)
Epoch: [16][10/12] Time 0.159 (0.338) Loss 0.6333 (0.6552) Prec@1 80.469 (79.190)
Test: [0/2] Time 1.686 (1.686) Loss 0.8334 (0.8334) Prec@1 81.641 (81.641)
* epoch: 16 Prec@1 81.982
* epoch: 16 Prec@1 81.982
Checkpoint Saved: output/All/train_2020-03-26-17-10-28_model=MobilenetV3-ep=3000-block=6/checkpoint.pth.tar
New Best Checkpoint saved: output/All/train_2020-03-26-17-10-28_model=MobilenetV3-ep=3000-block=6/model_best.pth.tar
Epoch: [17][0/12] Time 1.903 (1.903) Loss 0.5610 (0.5610) Prec@1 79.297 (79.297)
Epoch: [17][10/12] Time 0.158 (0.324) Loss 0.5548 (0.5905) Prec@1 77.734 (81.001)
Test: [0/2] Time 1.713 (1.713) Loss 0.7781 (0.7781) Prec@1 82.422 (82.422)
* epoch: 17 Prec@1 81.381
* epoch: 17 Prec@1 81.381
Checkpoint Saved: output/All/train_2020-03-26-17-10-28_model=MobilenetV3-ep=3000-block=6/checkpoint.pth.tar
Epoch: [18][0/12] Time 1.914 (1.914) Loss 0.5484 (0.5484) Prec@1 77.344 (77.344)
Epoch: [18][10/12] Time 0.159 (0.320) Loss 0.6511 (0.6283) Prec@1 79.688 (79.545)
Test: [0/2] Time 1.666 (1.666) Loss 0.8708 (0.8708) Prec@1 62.500 (62.500)
* epoch: 18 Prec@1 63.664
* epoch: 18 Prec@1 63.664
Checkpoint Saved: output/All/train_2020-03-26-17-10-28_model=MobilenetV3-ep=3000-block=6/checkpoint.pth.tar
Epoch: [19][0/12] Time 1.918 (1.918) Loss 0.6416 (0.6416) Prec@1 79.297 (79.297)
Epoch: [19][10/12] Time 0.158 (0.320) Loss 0.5400 (0.5515) Prec@1 83.203 (83.097)
Test: [0/2] Time 1.694 (1.694) Loss 0.6527 (0.6527) Prec@1 81.250 (81.250)
* epoch: 19 Prec@1 81.081
* epoch: 19 Prec@1 81.081
Checkpoint Saved: output/All/train_2020-03-26-17-10-28_model=MobilenetV3-ep=3000-block=6/checkpoint.pth.tar
Epoch: [20][0/12] Time 2.206 (2.206) Loss 0.5358 (0.5358) Prec@1 81.641 (81.641)
Epoch: [20][10/12] Time 0.160 (0.345) Loss 0.6041 (0.5729) Prec@1 77.734 (81.428)
Test: [0/2] Time 1.720 (1.720) Loss 0.5669 (0.5669) Prec@1 83.594 (83.594)
* epoch: 20 Prec@1 81.682
* epoch: 20 Prec@1 81.682
Checkpoint Saved: output/All/train_2020-03-26-17-10-28_model=MobilenetV3-ep=3000-block=6/checkpoint.pth.tar
Epoch: [21][0/12] Time 2.203 (2.203) Loss 0.4921 (0.4921) Prec@1 82.422 (82.422)
Epoch: [21][10/12] Time 0.158 (0.345) Loss 0.4483 (0.5416) Prec@1 84.375 (82.919)
Test: [0/2] Time 1.706 (1.706) Loss 0.7614 (0.7614) Prec@1 78.516 (78.516)
* epoch: 21 Prec@1 78.679
* epoch: 21 Prec@1 78.679
Checkpoint Saved: output/All/train_2020-03-26-17-10-28_model=MobilenetV3-ep=3000-block=6/checkpoint.pth.tar
Epoch: [22][0/12] Time 1.921 (1.921) Loss 0.6461 (0.6461) Prec@1 81.250 (81.250)
Epoch: [22][10/12] Time 0.158 (0.319) Loss 0.5921 (0.5490) Prec@1 82.031 (82.919)
Test: [0/2] Time 1.709 (1.709) Loss 0.6422 (0.6422) Prec@1 82.812 (82.812)
* epoch: 22 Prec@1 83.784
* epoch: 22 Prec@1 83.784
Checkpoint Saved: output/All/train_2020-03-26-17-10-28_model=MobilenetV3-ep=3000-block=6/checkpoint.pth.tar
New Best Checkpoint saved: output/All/train_2020-03-26-17-10-28_model=MobilenetV3-ep=3000-block=6/model_best.pth.tar
Epoch: [23][0/12] Time 1.939 (1.939) Loss 0.4573 (0.4573) Prec@1 87.500 (87.500)
Epoch: [23][10/12] Time 0.159 (0.331) Loss 0.5843 (0.5141) Prec@1 80.469 (84.624)
Test: [0/2] Time 1.685 (1.685) Loss 0.9102 (0.9102) Prec@1 75.000 (75.000)
* epoch: 23 Prec@1 76.577
* epoch: 23 Prec@1 76.577
Checkpoint Saved: output/All/train_2020-03-26-17-10-28_model=MobilenetV3-ep=3000-block=6/checkpoint.pth.tar
Epoch: [24][0/12] Time 1.925 (1.925) Loss 0.7865 (0.7865) Prec@1 78.125 (78.125)
Epoch: [24][10/12] Time 0.160 (0.319) Loss 0.5571 (0.6079) Prec@1 85.938 (80.859)
Test: [0/2] Time 1.712 (1.712) Loss 0.5972 (0.5972) Prec@1 82.422 (82.422)
* epoch: 24 Prec@1 82.583
* epoch: 24 Prec@1 82.583
Checkpoint Saved: output/All/train_2020-03-26-17-10-28_model=MobilenetV3-ep=3000-block=6/checkpoint.pth.tar
Epoch: [25][0/12] Time 1.958 (1.958) Loss 0.5632 (0.5632) Prec@1 83.984 (83.984)
Epoch: [25][10/12] Time 0.159 (0.322) Loss 0.3754 (0.5158) Prec@1 88.281 (83.700)
Test: [0/2] Time 1.701 (1.701) Loss 0.8734 (0.8734) Prec@1 77.734 (77.734)
* epoch: 25 Prec@1 78.378
* epoch: 25 Prec@1 78.378
Checkpoint Saved: output/All/train_2020-03-26-17-10-28_model=MobilenetV3-ep=3000-block=6/checkpoint.pth.tar
Epoch: [26][0/12] Time 2.162 (2.162) Loss 0.6764 (0.6764) Prec@1 81.641 (81.641)
Epoch: [26][10/12] Time 0.160 (0.342) Loss 0.5274 (0.5528) Prec@1 80.469 (82.209)
Test: [0/2] Time 1.696 (1.696) Loss 0.6240 (0.6240) Prec@1 85.938 (85.938)
* epoch: 26 Prec@1 84.084
* epoch: 26 Prec@1 84.084
Checkpoint Saved: output/All/train_2020-03-26-17-10-28_model=MobilenetV3-ep=3000-block=6/checkpoint.pth.tar
New Best Checkpoint saved: output/All/train_2020-03-26-17-10-28_model=MobilenetV3-ep=3000-block=6/model_best.pth.tar
Epoch: [27][0/12] Time 2.181 (2.181) Loss 0.5241 (0.5241) Prec@1 83.203 (83.203)
Epoch: [27][10/12] Time 0.158 (0.343) Loss 0.6147 (0.5227) Prec@1 79.297 (83.736)
Test: [0/2] Time 1.708 (1.708) Loss 0.7268 (0.7268) Prec@1 80.469 (80.469)
* epoch: 27 Prec@1 79.279
* epoch: 27 Prec@1 79.279
Checkpoint Saved: output/All/train_2020-03-26-17-10-28_model=MobilenetV3-ep=3000-block=6/checkpoint.pth.tar
Epoch: [28][0/12] Time 1.966 (1.966) Loss 0.4439 (0.4439) Prec@1 87.500 (87.500)
Epoch: [28][10/12] Time 0.159 (0.323) Loss 0.5766 (0.5243) Prec@1 85.547 (82.955)
Test: [0/2] Time 1.684 (1.684) Loss 0.6416 (0.6416) Prec@1 82.031 (82.031)
* epoch: 28 Prec@1 82.583
* epoch: 28 Prec@1 82.583
Checkpoint Saved: output/All/train_2020-03-26-17-10-28_model=MobilenetV3-ep=3000-block=6/checkpoint.pth.tar
Epoch: [29][0/12] Time 1.943 (1.943) Loss 0.5633 (0.5633) Prec@1 79.297 (79.297)
Epoch: [29][10/12] Time 0.159 (0.321) Loss 0.4740 (0.4708) Prec@1 85.547 (84.730)
Test: [0/2] Time 1.704 (1.704) Loss 0.6950 (0.6950) Prec@1 78.125 (78.125)
* epoch: 29 Prec@1 78.378
* epoch: 29 Prec@1 78.378
Checkpoint Saved: output/All/train_2020-03-26-17-10-28_model=MobilenetV3-ep=3000-block=6/checkpoint.pth.tar
Epoch: [30][0/12] Time 1.953 (1.953) Loss 0.6137 (0.6137) Prec@1 82.422 (82.422)
Epoch: [30][10/12] Time 0.159 (0.322) Loss 0.3596 (0.5028) Prec@1 87.891 (84.553)
Test: [0/2] Time 1.699 (1.699) Loss 0.6546 (0.6546) Prec@1 82.031 (82.031)
* epoch: 30 Prec@1 83.483
* epoch: 30 Prec@1 83.483
Checkpoint Saved: output/All/train_2020-03-26-17-10-28_model=MobilenetV3-ep=3000-block=6/checkpoint.pth.tar
Epoch: [31][0/12] Time 2.174 (2.174) Loss 0.4846 (0.4846) Prec@1 83.984 (83.984)
Epoch: [31][10/12] Time 0.160 (0.343) Loss 0.4474 (0.4548) Prec@1 84.375 (85.050)
Test: [0/2] Time 1.678 (1.678) Loss 0.6281 (0.6281) Prec@1 84.375 (84.375)
* epoch: 31 Prec@1 84.084
* epoch: 31 Prec@1 84.084
Checkpoint Saved: output/All/train_2020-03-26-17-10-28_model=MobilenetV3-ep=3000-block=6/checkpoint.pth.tar
New Best Checkpoint saved: output/All/train_2020-03-26-17-10-28_model=MobilenetV3-ep=3000-block=6/model_best.pth.tar
Epoch: [32][0/12] Time 1.929 (1.929) Loss 0.4780 (0.4780) Prec@1 83.594 (83.594)
Epoch: [32][10/12] Time 0.160 (0.320) Loss 0.4670 (0.5008) Prec@1 85.938 (83.842)
Test: [0/2] Time 1.698 (1.698) Loss 0.5501 (0.5501) Prec@1 82.422 (82.422)
* epoch: 32 Prec@1 82.583
* epoch: 32 Prec@1 82.583
Checkpoint Saved: output/All/train_2020-03-26-17-10-28_model=MobilenetV3-ep=3000-block=6/checkpoint.pth.tar
Epoch: [33][0/12] Time 2.224 (2.224) Loss 0.4372 (0.4372) Prec@1 89.062 (89.062)
Epoch: [33][10/12] Time 0.159 (0.347) Loss 0.4002 (0.4672) Prec@1 87.500 (85.369)
Test: [0/2] Time 1.650 (1.650) Loss 0.9937 (0.9937) Prec@1 65.234 (65.234)
* epoch: 33 Prec@1 65.165
* epoch: 33 Prec@1 65.165
Checkpoint Saved: output/All/train_2020-03-26-17-10-28_model=MobilenetV3-ep=3000-block=6/checkpoint.pth.tar
Epoch: [34][0/12] Time 2.233 (2.233) Loss 0.5080 (0.5080) Prec@1 82.031 (82.031)
Epoch: [34][10/12] Time 0.159 (0.347) Loss 0.4675 (0.4632) Prec@1 84.766 (84.908)
Test: [0/2] Time 1.673 (1.673) Loss 0.6285 (0.6285) Prec@1 83.984 (83.984)
* epoch: 34 Prec@1 84.084
* epoch: 34 Prec@1 84.084
Checkpoint Saved: output/All/train_2020-03-26-17-10-28_model=MobilenetV3-ep=3000-block=6/checkpoint.pth.tar
Epoch: [35][0/12] Time 1.915 (1.915) Loss 0.4910 (0.4910) Prec@1 86.328 (86.328)
Epoch: [35][10/12] Time 0.159 (0.320) Loss 0.3640 (0.4289) Prec@1 88.281 (86.222)
Test: [0/2] Time 1.725 (1.725) Loss 0.9229 (0.9229) Prec@1 80.859 (80.859)
* epoch: 35 Prec@1 80.180
* epoch: 35 Prec@1 80.180
Checkpoint Saved: output/All/train_2020-03-26-17-10-28_model=MobilenetV3-ep=3000-block=6/checkpoint.pth.tar
Epoch: [36][0/12] Time 1.938 (1.938) Loss 0.5173 (0.5173) Prec@1 81.641 (81.641)
Epoch: [36][10/12] Time 0.159 (0.321) Loss 0.5336 (0.4753) Prec@1 83.203 (84.553)
Test: [0/2] Time 1.725 (1.725) Loss 0.5637 (0.5637) Prec@1 87.109 (87.109)
* epoch: 36 Prec@1 86.186
* epoch: 36 Prec@1 86.186
Checkpoint Saved: output/All/train_2020-03-26-17-10-28_model=MobilenetV3-ep=3000-block=6/checkpoint.pth.tar
New Best Checkpoint saved: output/All/train_2020-03-26-17-10-28_model=MobilenetV3-ep=3000-block=6/model_best.pth.tar
Epoch: [37][0/12] Time 2.224 (2.224) Loss 0.3901 (0.3901) Prec@1 86.719 (86.719)
Epoch: [37][10/12] Time 0.159 (0.347) Loss 0.4682 (0.4061) Prec@1 82.422 (86.648)
Test: [0/2] Time 1.686 (1.686) Loss 0.9415 (0.9415) Prec@1 74.219 (74.219)
* epoch: 37 Prec@1 74.474
* epoch: 37 Prec@1 74.474
Checkpoint Saved: output/All/train_2020-03-26-17-10-28_model=MobilenetV3-ep=3000-block=6/checkpoint.pth.tar
Epoch: [38][0/12] Time 1.960 (1.960) Loss 0.4797 (0.4797) Prec@1 84.375 (84.375)
Epoch: [38][10/12] Time 0.159 (0.324) Loss 0.4020 (0.4410) Prec@1 88.672 (85.653)
Test: [0/2] Time 1.692 (1.692) Loss 0.4875 (0.4875) Prec@1 84.375 (84.375)
* epoch: 38 Prec@1 83.483
* epoch: 38 Prec@1 83.483
Checkpoint Saved: output/All/train_2020-03-26-17-10-28_model=MobilenetV3-ep=3000-block=6/checkpoint.pth.tar
Epoch: [39][0/12] Time 1.931 (1.931) Loss 0.4579 (0.4579) Prec@1 85.156 (85.156)
Epoch: [39][10/12] Time 0.159 (0.330) Loss 0.3570 (0.3953) Prec@1 87.500 (86.754)
Test: [0/2] Time 1.673 (1.673) Loss 0.6890 (0.6890) Prec@1 84.766 (84.766)
* epoch: 39 Prec@1 83.784
* epoch: 39 Prec@1 83.784
Checkpoint Saved: output/All/train_2020-03-26-17-10-28_model=MobilenetV3-ep=3000-block=6/checkpoint.pth.tar
Epoch: [40][0/12] Time 1.908 (1.908) Loss 0.5327 (0.5327) Prec@1 82.031 (82.031)
Epoch: [40][10/12] Time 0.160 (0.320) Loss 0.3981 (0.4445) Prec@1 88.281 (85.724)
Test: [0/2] Time 1.706 (1.706) Loss 0.4213 (0.4213) Prec@1 88.672 (88.672)
* epoch: 40 Prec@1 87.387
* epoch: 40 Prec@1 87.387
Checkpoint Saved: output/All/train_2020-03-26-17-10-28_model=MobilenetV3-ep=3000-block=6/checkpoint.pth.tar
New Best Checkpoint saved: output/All/train_2020-03-26-17-10-28_model=MobilenetV3-ep=3000-block=6/model_best.pth.tar
Epoch: [41][0/12] Time 1.924 (1.924) Loss 0.4044 (0.4044) Prec@1 85.938 (85.938)
Epoch: [41][10/12] Time 0.160 (0.318) Loss 0.4958 (0.3959) Prec@1 86.719 (86.861)
Test: [0/2] Time 1.699 (1.699) Loss 0.7511 (0.7511) Prec@1 76.172 (76.172)
* epoch: 41 Prec@1 74.174
* epoch: 41 Prec@1 74.174
Checkpoint Saved: output/All/train_2020-03-26-17-10-28_model=MobilenetV3-ep=3000-block=6/checkpoint.pth.tar
Epoch: [42][0/12] Time 2.204 (2.204) Loss 0.4268 (0.4268) Prec@1 88.281 (88.281)
Epoch: [42][10/12] Time 0.158 (0.345) Loss 0.4259 (0.4605) Prec@1 87.109 (84.979)
Test: [0/2] Time 1.730 (1.730) Loss 0.6196 (0.6196) Prec@1 87.109 (87.109)
* epoch: 42 Prec@1 87.387
* epoch: 42 Prec@1 87.387
Checkpoint Saved: output/All/train_2020-03-26-17-10-28_model=MobilenetV3-ep=3000-block=6/checkpoint.pth.tar
New Best Checkpoint saved: output/All/train_2020-03-26-17-10-28_model=MobilenetV3-ep=3000-block=6/model_best.pth.tar
Epoch: [43][0/12] Time 2.063 (2.063) Loss 0.4617 (0.4617) Prec@1 84.375 (84.375)
Epoch: [43][10/12] Time 0.159 (0.332) Loss 0.4178 (0.3862) Prec@1 87.891 (87.287)
Test: [0/2] Time 1.692 (1.692) Loss 0.7116 (0.7116) Prec@1 78.516 (78.516)
* epoch: 43 Prec@1 78.378
* epoch: 43 Prec@1 78.378
Checkpoint Saved: output/All/train_2020-03-26-17-10-28_model=MobilenetV3-ep=3000-block=6/checkpoint.pth.tar
Epoch: [44][0/12] Time 1.968 (1.968) Loss 0.6055 (0.6055) Prec@1 76.953 (76.953)
Epoch: [44][10/12] Time 0.160 (0.331) Loss 0.4583 (0.4649) Prec@1 85.156 (83.487)
Test: [0/2] Time 1.690 (1.690) Loss 0.4601 (0.4601) Prec@1 87.109 (87.109)
* epoch: 44 Prec@1 86.787
* epoch: 44 Prec@1 86.787
Checkpoint Saved: output/All/train_2020-03-26-17-10-28_model=MobilenetV3-ep=3000-block=6/checkpoint.pth.tar
Epoch: [45][0/12] Time 1.908 (1.908) Loss 0.4238 (0.4238) Prec@1 87.109 (87.109)
Epoch: [45][10/12] Time 0.159 (0.318) Loss 0.5030 (0.3826) Prec@1 82.812 (87.074)
Test: [0/2] Time 1.707 (1.707) Loss 0.7285 (0.7285) Prec@1 79.688 (79.688)
* epoch: 45 Prec@1 80.781
* epoch: 45 Prec@1 80.781
Checkpoint Saved: output/All/train_2020-03-26-17-10-28_model=MobilenetV3-ep=3000-block=6/checkpoint.pth.tar
Epoch: [46][0/12] Time 1.955 (1.955) Loss 0.5765 (0.5765) Prec@1 81.250 (81.250)
Epoch: [46][10/12] Time 0.159 (0.321) Loss 0.3067 (0.4130) Prec@1 88.672 (86.009)
Test: [0/2] Time 1.686 (1.686) Loss 0.4962 (0.4962) Prec@1 89.062 (89.062)
* epoch: 46 Prec@1 88.288
* epoch: 46 Prec@1 88.288
Checkpoint Saved: output/All/train_2020-03-26-17-10-28_model=MobilenetV3-ep=3000-block=6/checkpoint.pth.tar
New Best Checkpoint saved: output/All/train_2020-03-26-17-10-28_model=MobilenetV3-ep=3000-block=6/model_best.pth.tar
Epoch: [47][0/12] Time 1.986 (1.986) Loss 0.3760 (0.3760) Prec@1 87.891 (87.891)
Epoch: [47][10/12] Time 0.159 (0.335) Loss 0.3775 (0.3699) Prec@1 84.766 (87.216)
Test: [0/2] Time 1.697 (1.697) Loss 0.5537 (0.5537) Prec@1 85.938 (85.938)
* epoch: 47 Prec@1 85.886
* epoch: 47 Prec@1 85.886
Checkpoint Saved: output/All/train_2020-03-26-17-10-28_model=MobilenetV3-ep=3000-block=6/checkpoint.pth.tar
Epoch: [48][0/12] Time 1.951 (1.951) Loss 0.3737 (0.3737) Prec@1 87.500 (87.500)
Epoch: [48][10/12] Time 0.159 (0.322) Loss 0.3257 (0.3962) Prec@1 88.672 (87.145)
Test: [0/2] Time 1.693 (1.693) Loss 0.5342 (0.5342) Prec@1 87.109 (87.109)
* epoch: 48 Prec@1 85.586
* epoch: 48 Prec@1 85.586
Checkpoint Saved: output/All/train_2020-03-26-17-10-28_model=MobilenetV3-ep=3000-block=6/checkpoint.pth.tar
Epoch: [49][0/12] Time 1.991 (1.991) Loss 0.3158 (0.3158) Prec@1 88.672 (88.672)
Epoch: [49][10/12] Time 0.159 (0.326) Loss 0.4068 (0.3745) Prec@1 87.109 (87.571)
Test: [0/2] Time 1.684 (1.684) Loss 0.5855 (0.5855) Prec@1 81.641 (81.641)
* epoch: 49 Prec@1 80.781
* epoch: 49 Prec@1 80.781
Checkpoint Saved: output/All/train_2020-03-26-17-10-28_model=MobilenetV3-ep=3000-block=6/checkpoint.pth.tar
Epoch: [50][0/12] Time 1.904 (1.904) Loss 0.6005 (0.6005) Prec@1 79.297 (79.297)
Epoch: [50][10/12] Time 0.159 (0.319) Loss 0.3856 (0.3787) Prec@1 86.328 (87.464)
Test: [0/2] Time 1.680 (1.680) Loss 0.6797 (0.6797) Prec@1 77.344 (77.344)
* epoch: 50 Prec@1 78.979
* epoch: 50 Prec@1 78.979
Checkpoint Saved: output/All/train_2020-03-26-17-10-28_model=MobilenetV3-ep=3000-block=6/checkpoint.pth.tar
Epoch: [51][0/12] Time 2.151 (2.151) Loss 0.3381 (0.3381) Prec@1 89.844 (89.844)
Epoch: [51][10/12] Time 0.157 (0.340) Loss 0.3905 (0.3525) Prec@1 83.594 (88.352)
Test: [0/2] Time 1.705 (1.705) Loss 0.5561 (0.5561) Prec@1 86.328 (86.328)
* epoch: 51 Prec@1 85.886
* epoch: 51 Prec@1 85.886
Checkpoint Saved: output/All/train_2020-03-26-17-10-28_model=MobilenetV3-ep=3000-block=6/checkpoint.pth.tar
Epoch: [52][0/12] Time 1.977 (1.977) Loss 0.3419 (0.3419) Prec@1 88.281 (88.281)
Epoch: [52][10/12] Time 0.159 (0.325) Loss 0.3898 (0.3460) Prec@1 88.281 (88.565)
Test: [0/2] Time 1.704 (1.704) Loss 0.4699 (0.4699) Prec@1 86.328 (86.328)
* epoch: 52 Prec@1 85.285
* epoch: 52 Prec@1 85.285
Checkpoint Saved: output/All/train_2020-03-26-17-10-28_model=MobilenetV3-ep=3000-block=6/checkpoint.pth.tar
Epoch: [53][0/12] Time 1.954 (1.954) Loss 0.2477 (0.2477) Prec@1 88.672 (88.672)
Epoch: [53][10/12] Time 0.159 (0.330) Loss 0.3368 (0.3348) Prec@1 88.672 (88.920)
Test: [0/2] Time 1.672 (1.672) Loss 0.9721 (0.9721) Prec@1 60.547 (60.547)
* epoch: 53 Prec@1 61.261
* epoch: 53 Prec@1 61.261
Checkpoint Saved: output/All/train_2020-03-26-17-10-28_model=MobilenetV3-ep=3000-block=6/checkpoint.pth.tar
Epoch: [54][0/12] Time 2.249 (2.249) Loss 0.4255 (0.4255) Prec@1 82.031 (82.031)
Epoch: [54][10/12] Time 0.160 (0.349) Loss 0.3790 (0.3706) Prec@1 85.547 (86.506)
Test: [0/2] Time 1.664 (1.664) Loss 0.4914 (0.4914) Prec@1 88.281 (88.281)
* epoch: 54 Prec@1 88.889
* epoch: 54 Prec@1 88.889
Checkpoint Saved: output/All/train_2020-03-26-17-10-28_model=MobilenetV3-ep=3000-block=6/checkpoint.pth.tar
New Best Checkpoint saved: output/All/train_2020-03-26-17-10-28_model=MobilenetV3-ep=3000-block=6/model_best.pth.tar
Epoch: [55][0/12] Time 1.919 (1.919) Loss 0.3694 (0.3694) Prec@1 85.547 (85.547)
Epoch: [55][10/12] Time 0.159 (0.319) Loss 0.3774 (0.3204) Prec@1 88.672 (89.240)
Test: [0/2] Time 1.689 (1.689) Loss 0.7353 (0.7353) Prec@1 80.078 (80.078)
* epoch: 55 Prec@1 79.880
* epoch: 55 Prec@1 79.880
Checkpoint Saved: output/All/train_2020-03-26-17-10-28_model=MobilenetV3-ep=3000-block=6/checkpoint.pth.tar
Epoch: [56][0/12] Time 1.947 (1.947) Loss 0.4603 (0.4603) Prec@1 83.594 (83.594)
Epoch: [56][10/12] Time 0.159 (0.322) Loss 0.3666 (0.3867) Prec@1 87.891 (86.932)
Test: [0/2] Time 1.722 (1.722) Loss 0.4614 (0.4614) Prec@1 86.719 (86.719)
* epoch: 56 Prec@1 86.486
* epoch: 56 Prec@1 86.486
Checkpoint Saved: output/All/train_2020-03-26-17-10-28_model=MobilenetV3-ep=3000-block=6/checkpoint.pth.tar
Epoch: [57][0/12] Time 1.980 (1.980) Loss 0.3089 (0.3089) Prec@1 89.844 (89.844)
Epoch: [57][10/12] Time 0.160 (0.323) Loss 0.2758 (0.3167) Prec@1 90.234 (89.027)
Test: [0/2] Time 1.713 (1.713) Loss 0.7926 (0.7926) Prec@1 69.141 (69.141)
* epoch: 57 Prec@1 68.769
* epoch: 57 Prec@1 68.769
Checkpoint Saved: output/All/train_2020-03-26-17-10-28_model=MobilenetV3-ep=3000-block=6/checkpoint.pth.tar
Epoch: [58][0/12] Time 1.897 (1.897) Loss 0.3536 (0.3536) Prec@1 86.328 (86.328)
Epoch: [58][10/12] Time 0.160 (0.318) Loss 0.3304 (0.3711) Prec@1 91.406 (87.109)
Test: [0/2] Time 1.722 (1.722) Loss 0.4612 (0.4612) Prec@1 89.062 (89.062)
* epoch: 58 Prec@1 89.790
* epoch: 58 Prec@1 89.790
Checkpoint Saved: output/All/train_2020-03-26-17-10-28_model=MobilenetV3-ep=3000-block=6/checkpoint.pth.tar
New Best Checkpoint saved: output/All/train_2020-03-26-17-10-28_model=MobilenetV3-ep=3000-block=6/model_best.pth.tar
Epoch: [59][0/12] Time 2.226 (2.226) Loss 0.2936 (0.2936) Prec@1 91.406 (91.406)
Epoch: [59][10/12] Time 0.159 (0.348) Loss 0.3097 (0.3106) Prec@1 87.500 (89.560)
Test: [0/2] Time 1.691 (1.691) Loss 0.5900 (0.5900) Prec@1 83.984 (83.984)
* epoch: 59 Prec@1 85.285
* epoch: 59 Prec@1 85.285
Checkpoint Saved: output/All/train_2020-03-26-17-10-28_model=MobilenetV3-ep=3000-block=6/checkpoint.pth.tar
Epoch: [60][0/12] Time 2.012 (2.012) Loss 0.2896 (0.2896) Prec@1 90.234 (90.234)
Epoch: [60][10/12] Time 0.158 (0.326) Loss 0.3392 (0.3331) Prec@1 87.891 (88.246)
Test: [0/2] Time 1.725 (1.725) Loss 0.5262 (0.5262) Prec@1 89.453 (89.453)
* epoch: 60 Prec@1 88.889
* epoch: 60 Prec@1 88.889
Checkpoint Saved: output/All/train_2020-03-26-17-10-28_model=MobilenetV3-ep=3000-block=6/checkpoint.pth.tar
Epoch: [61][0/12] Time 1.936 (1.936) Loss 0.3321 (0.3321) Prec@1 88.281 (88.281)
Epoch: [61][10/12] Time 0.159 (0.320) Loss 0.3931 (0.3128) Prec@1 87.891 (89.347)
Test: [0/2] Time 1.672 (1.672) Loss 0.8900 (0.8900) Prec@1 57.812 (57.812)
* epoch: 61 Prec@1 58.258
* epoch: 61 Prec@1 58.258
Checkpoint Saved: output/All/train_2020-03-26-17-10-28_model=MobilenetV3-ep=3000-block=6/checkpoint.pth.tar
Epoch: [62][0/12] Time 1.955 (1.955) Loss 0.3191 (0.3191) Prec@1 89.844 (89.844)
Epoch: [62][10/12] Time 0.154 (0.322) Loss 0.3570 (0.3385) Prec@1 88.281 (88.636)
Test: [0/2] Time 1.709 (1.709) Loss 0.4341 (0.4341) Prec@1 86.719 (86.719)
* epoch: 62 Prec@1 86.486
* epoch: 62 Prec@1 86.486
Checkpoint Saved: output/All/train_2020-03-26-17-10-28_model=MobilenetV3-ep=3000-block=6/checkpoint.pth.tar
Epoch: [63][0/12] Time 1.930 (1.930) Loss 0.2078 (0.2078) Prec@1 92.188 (92.188)
Epoch: [63][10/12] Time 0.159 (0.320) Loss 0.3926 (0.2808) Prec@1 88.281 (90.376)
Test: [0/2] Time 1.681 (1.681) Loss 0.5782 (0.5782) Prec@1 71.484 (71.484)
* epoch: 63 Prec@1 70.270
* epoch: 63 Prec@1 70.270
Checkpoint Saved: output/All/train_2020-03-26-17-10-28_model=MobilenetV3-ep=3000-block=6/checkpoint.pth.tar
Epoch: [64][0/12] Time 2.252 (2.252) Loss 0.3744 (0.3744) Prec@1 87.891 (87.891)
Epoch: [64][10/12] Time 0.158 (0.349) Loss 0.3298 (0.3217) Prec@1 88.672 (89.276)
Test: [0/2] Time 1.717 (1.717) Loss 0.4983 (0.4983) Prec@1 89.062 (89.062)
* epoch: 64 Prec@1 89.489
* epoch: 64 Prec@1 89.489
Checkpoint Saved: output/All/train_2020-03-26-17-10-28_model=MobilenetV3-ep=3000-block=6/checkpoint.pth.tar
Epoch: [65][0/12] Time 1.913 (1.913) Loss 0.3845 (0.3845) Prec@1 87.891 (87.891)
Epoch: [65][10/12] Time 0.161 (0.332) Loss 0.2949 (0.3003) Prec@1 89.062 (89.950)
Test: [0/2] Time 1.693 (1.693) Loss 0.5655 (0.5655) Prec@1 86.328 (86.328)
* epoch: 65 Prec@1 87.087
* epoch: 65 Prec@1 87.087
Checkpoint Saved: output/All/train_2020-03-26-17-10-28_model=MobilenetV3-ep=3000-block=6/checkpoint.pth.tar
Epoch: [66][0/12] Time 1.949 (1.949) Loss 0.4444 (0.4444) Prec@1 87.109 (87.109)
Epoch: [66][10/12] Time 0.159 (0.322) Loss 0.3399 (0.3130) Prec@1 89.062 (89.098)
Test: [0/2] Time 1.703 (1.703) Loss 0.5085 (0.5085) Prec@1 89.844 (89.844)
* epoch: 66 Prec@1 87.688
* epoch: 66 Prec@1 87.688
Checkpoint Saved: output/All/train_2020-03-26-17-10-28_model=MobilenetV3-ep=3000-block=6/checkpoint.pth.tar
Epoch: [67][0/12] Time 1.946 (1.946) Loss 0.2375 (0.2375) Prec@1 91.016 (91.016)
Epoch: [67][10/12] Time 0.159 (0.328) Loss 0.3387 (0.2769) Prec@1 85.547 (90.128)
Test: [0/2] Time 1.703 (1.703) Loss 0.5086 (0.5086) Prec@1 88.281 (88.281)
* epoch: 67 Prec@1 87.387
* epoch: 67 Prec@1 87.387
Checkpoint Saved: output/All/train_2020-03-26-17-10-28_model=MobilenetV3-ep=3000-block=6/checkpoint.pth.tar
Epoch: [68][0/12] Time 1.972 (1.972) Loss 0.3379 (0.3379) Prec@1 90.234 (90.234)
Epoch: [68][10/12] Time 0.159 (0.324) Loss 0.2454 (0.3145) Prec@1 91.406 (88.672)
Test: [0/2] Time 1.719 (1.719) Loss 0.6045 (0.6045) Prec@1 87.500 (87.500)
* epoch: 68 Prec@1 88.589
* epoch: 68 Prec@1 88.589
Checkpoint Saved: output/All/train_2020-03-26-17-10-28_model=MobilenetV3-ep=3000-block=6/checkpoint.pth.tar
Epoch: [69][0/12] Time 1.947 (1.947) Loss 0.2363 (0.2363) Prec@1 90.625 (90.625)
Epoch: [69][10/12] Time 0.159 (0.321) Loss 0.2486 (0.2593) Prec@1 90.234 (90.874)
Test: [0/2] Time 1.710 (1.710) Loss 0.5298 (0.5298) Prec@1 80.078 (80.078)
* epoch: 69 Prec@1 81.381
* epoch: 69 Prec@1 81.381
Checkpoint Saved: output/All/train_2020-03-26-17-10-28_model=MobilenetV3-ep=3000-block=6/checkpoint.pth.tar
Epoch: [70][0/12] Time 1.949 (1.949) Loss 0.2916 (0.2916) Prec@1 89.453 (89.453)
Epoch: [70][10/12] Time 0.158 (0.328) Loss 0.2786 (0.2966) Prec@1 89.453 (89.737)
Test: [0/2] Time 1.683 (1.683) Loss 0.4976 (0.4976) Prec@1 87.500 (87.500)
* epoch: 70 Prec@1 88.288
* epoch: 70 Prec@1 88.288
Checkpoint Saved: output/All/train_2020-03-26-17-10-28_model=MobilenetV3-ep=3000-block=6/checkpoint.pth.tar
Epoch: [71][0/12] Time 1.928 (1.928) Loss 0.1949 (0.1949) Prec@1 94.141 (94.141)
Epoch: [71][10/12] Time 0.159 (0.330) Loss 0.1884 (0.2445) Prec@1 93.359 (90.696)
Test: [0/2] Time 1.684 (1.684) Loss 0.5251 (0.5251) Prec@1 82.812 (82.812)
* epoch: 71 Prec@1 81.081
* epoch: 71 Prec@1 81.081
Checkpoint Saved: output/All/train_2020-03-26-17-10-28_model=MobilenetV3-ep=3000-block=6/checkpoint.pth.tar
Epoch: [72][0/12] Time 1.930 (1.930) Loss 0.3282 (0.3282) Prec@1 87.500 (87.500)
Epoch: [72][10/12] Time 0.159 (0.320) Loss 0.3842 (0.3169) Prec@1 86.719 (89.666)
Test: [0/2] Time 1.709 (1.709) Loss 0.6996 (0.6996) Prec@1 89.453 (89.453)
* epoch: 72 Prec@1 90.390
* epoch: 72 Prec@1 90.390
Checkpoint Saved: output/All/train_2020-03-26-17-10-28_model=MobilenetV3-ep=3000-block=6/checkpoint.pth.tar
New Best Checkpoint saved: output/All/train_2020-03-26-17-10-28_model=MobilenetV3-ep=3000-block=6/model_best.pth.tar
Epoch: [73][0/12] Time 2.187 (2.187) Loss 0.3168 (0.3168) Prec@1 87.500 (87.500)
Epoch: [73][10/12] Time 0.159 (0.343) Loss 0.3492 (0.2848) Prec@1 85.938 (89.986)
Test: [0/2] Time 1.708 (1.708) Loss 0.9264 (0.9264) Prec@1 85.547 (85.547)
* epoch: 73 Prec@1 86.787
* epoch: 73 Prec@1 86.787
Checkpoint Saved: output/All/train_2020-03-26-17-10-28_model=MobilenetV3-ep=3000-block=6/checkpoint.pth.tar
Epoch: [74][0/12] Time 1.913 (1.913) Loss 0.3152 (0.3152) Prec@1 88.281 (88.281)
Epoch: [74][10/12] Time 0.159 (0.319) Loss 0.2675 (0.2727) Prec@1 92.578 (90.518)
Test: [0/2] Time 1.720 (1.720) Loss 0.5490 (0.5490) Prec@1 86.328 (86.328)
* epoch: 74 Prec@1 86.186
* epoch: 74 Prec@1 86.186
Checkpoint Saved: output/All/train_2020-03-26-17-10-28_model=MobilenetV3-ep=3000-block=6/checkpoint.pth.tar
Epoch: [75][0/12] Time 1.922 (1.922) Loss 0.2301 (0.2301) Prec@1 90.625 (90.625)
Epoch: [75][10/12] Time 0.159 (0.320) Loss 0.3265 (0.2525) Prec@1 87.109 (91.229)
Test: [0/2] Time 1.699 (1.699) Loss 0.6857 (0.6857) Prec@1 82.812 (82.812)
* epoch: 75 Prec@1 82.282
* epoch: 75 Prec@1 82.282
Checkpoint Saved: output/All/train_2020-03-26-17-10-28_model=MobilenetV3-ep=3000-block=6/checkpoint.pth.tar
Epoch: [76][0/12] Time 2.240 (2.240) Loss 0.2848 (0.2848) Prec@1 87.500 (87.500)
Epoch: [76][10/12] Time 0.160 (0.348) Loss 0.3408 (0.3049) Prec@1 87.500 (89.560)
Test: [0/2] Time 1.707 (1.707) Loss 0.5952 (0.5952) Prec@1 81.641 (81.641)
* epoch: 76 Prec@1 81.682
* epoch: 76 Prec@1 81.682
Checkpoint Saved: output/All/train_2020-03-26-17-10-28_model=MobilenetV3-ep=3000-block=6/checkpoint.pth.tar
Epoch: [77][0/12] Time 1.904 (1.904) Loss 0.2558 (0.2558) Prec@1 92.578 (92.578)
Epoch: [77][10/12] Time 0.159 (0.324) Loss 0.3256 (0.2529) Prec@1 87.500 (91.229)
Test: [0/2] Time 1.708 (1.708) Loss 0.5739 (0.5739) Prec@1 85.156 (85.156)
* epoch: 77 Prec@1 82.883
* epoch: 77 Prec@1 82.883
Checkpoint Saved: output/All/train_2020-03-26-17-10-28_model=MobilenetV3-ep=3000-block=6/checkpoint.pth.tar
Epoch: [78][0/12] Time 2.032 (2.032) Loss 0.2934 (0.2934) Prec@1 89.844 (89.844)
Epoch: [78][10/12] Time 0.159 (0.330) Loss 0.2887 (0.2670) Prec@1 92.578 (91.300)
Test: [0/2] Time 1.688 (1.688) Loss 0.5590 (0.5590) Prec@1 85.156 (85.156)
* epoch: 78 Prec@1 86.186
* epoch: 78 Prec@1 86.186
Checkpoint Saved: output/All/train_2020-03-26-17-10-28_model=MobilenetV3-ep=3000-block=6/checkpoint.pth.tar
Epoch: [79][0/12] Time 1.945 (1.945) Loss 0.2431 (0.2431) Prec@1 91.797 (91.797)
Epoch: [79][10/12] Time 0.160 (0.332) Loss 0.3305 (0.2164) Prec@1 87.109 (92.543)
Test: [0/2] Time 1.675 (1.675) Loss 0.7119 (0.7119) Prec@1 77.344 (77.344)
* epoch: 79 Prec@1 76.877
* epoch: 79 Prec@1 76.877
Checkpoint Saved: output/All/train_2020-03-26-17-10-28_model=MobilenetV3-ep=3000-block=6/checkpoint.pth.tar
Epoch: [80][0/12] Time 2.189 (2.189) Loss 0.2332 (0.2332) Prec@1 92.969 (92.969)
Epoch: [80][10/12] Time 0.159 (0.343) Loss 0.2309 (0.2770) Prec@1 93.359 (91.335)
Test: [0/2] Time 1.680 (1.680) Loss 0.3864 (0.3864) Prec@1 90.234 (90.234)
* epoch: 80 Prec@1 87.688
* epoch: 80 Prec@1 87.688
Checkpoint Saved: output/All/train_2020-03-26-17-10-28_model=MobilenetV3-ep=3000-block=6/checkpoint.pth.tar
Epoch: [81][0/12] Time 2.239 (2.239) Loss 0.2767 (0.2767) Prec@1 92.578 (92.578)
Epoch: [81][10/12] Time 0.160 (0.348) Loss 0.3311 (0.2484) Prec@1 86.328 (91.335)
Test: [0/2] Time 1.720 (1.720) Loss 0.6348 (0.6348) Prec@1 76.562 (76.562)
* epoch: 81 Prec@1 76.276
* epoch: 81 Prec@1 76.276
Checkpoint Saved: output/All/train_2020-03-26-17-10-28_model=MobilenetV3-ep=3000-block=6/checkpoint.pth.tar
Epoch: [82][0/12] Time 2.256 (2.256) Loss 0.1899 (0.1899) Prec@1 92.969 (92.969)
Epoch: [82][10/12] Time 0.159 (0.349) Loss 0.2917 (0.2652) Prec@1 90.625 (91.442)
Test: [0/2] Time 1.689 (1.689) Loss 0.5168 (0.5168) Prec@1 85.156 (85.156)
* epoch: 82 Prec@1 85.886
* epoch: 82 Prec@1 85.886
Checkpoint Saved: output/All/train_2020-03-26-17-10-28_model=MobilenetV3-ep=3000-block=6/checkpoint.pth.tar
Epoch: [83][0/12] Time 2.051 (2.051) Loss 0.1982 (0.1982) Prec@1 92.188 (92.188)
Epoch: [83][10/12] Time 0.159 (0.332) Loss 0.2491 (0.2078) Prec@1 89.844 (92.543)
Test: [0/2] Time 1.682 (1.682) Loss 0.6296 (0.6296) Prec@1 80.078 (80.078)
* epoch: 83 Prec@1 77.177
* epoch: 83 Prec@1 77.177
Checkpoint Saved: output/All/train_2020-03-26-17-10-28_model=MobilenetV3-ep=3000-block=6/checkpoint.pth.tar
Epoch: [84][0/12] Time 1.912 (1.912) Loss 0.1927 (0.1927) Prec@1 93.359 (93.359)
Epoch: [84][10/12] Time 0.157 (0.318) Loss 0.1888 (0.2599) Prec@1 93.359 (91.406)
Test: [0/2] Time 1.707 (1.707) Loss 0.5512 (0.5512) Prec@1 88.672 (88.672)
* epoch: 84 Prec@1 89.790
* epoch: 84 Prec@1 89.790
Checkpoint Saved: output/All/train_2020-03-26-17-10-28_model=MobilenetV3-ep=3000-block=6/checkpoint.pth.tar
Epoch: [85][0/12] Time 1.909 (1.909) Loss 0.1752 (0.1752) Prec@1 93.750 (93.750)
Epoch: [85][10/12] Time 0.161 (0.319) Loss 0.2747 (0.2133) Prec@1 91.797 (92.614)
Test: [0/2] Time 1.704 (1.704) Loss 0.5671 (0.5671) Prec@1 86.328 (86.328)
* epoch: 85 Prec@1 87.387
* epoch: 85 Prec@1 87.387
Checkpoint Saved: output/All/train_2020-03-26-17-10-28_model=MobilenetV3-ep=3000-block=6/checkpoint.pth.tar
Epoch: [86][0/12] Time 2.240 (2.240) Loss 0.2070 (0.2070) Prec@1 89.844 (89.844)
Epoch: [86][10/12] Time 0.160 (0.348) Loss 0.2832 (0.2450) Prec@1 91.797 (91.513)
Test: [0/2] Time 1.709 (1.709) Loss 0.5368 (0.5368) Prec@1 86.719 (86.719)
* epoch: 86 Prec@1 85.586
* epoch: 86 Prec@1 85.586
Checkpoint Saved: output/All/train_2020-03-26-17-10-28_model=MobilenetV3-ep=3000-block=6/checkpoint.pth.tar
Epoch: [87][0/12] Time 1.953 (1.953) Loss 0.1469 (0.1469) Prec@1 96.094 (96.094)
Epoch: [87][10/12] Time 0.161 (0.329) Loss 0.1976 (0.2014) Prec@1 92.969 (93.466)
Test: [0/2] Time 1.704 (1.704) Loss 0.6268 (0.6268) Prec@1 86.328 (86.328)
* epoch: 87 Prec@1 88.288
* epoch: 87 Prec@1 88.288
Checkpoint Saved: output/All/train_2020-03-26-17-10-28_model=MobilenetV3-ep=3000-block=6/checkpoint.pth.tar
Epoch: [88][0/12] Time 1.976 (1.976) Loss 0.1892 (0.1892) Prec@1 92.969 (92.969)
Epoch: [88][10/12] Time 0.159 (0.323) Loss 0.1663 (0.2332) Prec@1 93.359 (91.442)
Test: [0/2] Time 1.725 (1.725) Loss 0.4768 (0.4768) Prec@1 89.844 (89.844)
* epoch: 88 Prec@1 89.489
* epoch: 88 Prec@1 89.489
Checkpoint Saved: output/All/train_2020-03-26-17-10-28_model=MobilenetV3-ep=3000-block=6/checkpoint.pth.tar
Epoch: [89][0/12] Time 2.208 (2.208) Loss 0.1286 (0.1286) Prec@1 94.141 (94.141)
Epoch: [89][10/12] Time 0.159 (0.345) Loss 0.1622 (0.1895) Prec@1 93.750 (92.862)
Test: [0/2] Time 1.670 (1.670) Loss 0.7504 (0.7504) Prec@1 86.719 (86.719)
* epoch: 89 Prec@1 88.288
* epoch: 89 Prec@1 88.288
Checkpoint Saved: output/All/train_2020-03-26-17-10-28_model=MobilenetV3-ep=3000-block=6/checkpoint.pth.tar
Epoch: [90][0/12] Time 1.919 (1.919) Loss 0.1749 (0.1749) Prec@1 94.531 (94.531)
Epoch: [90][10/12] Time 0.160 (0.319) Loss 0.3030 (0.2244) Prec@1 90.234 (92.472)
Test: [0/2] Time 1.703 (1.703) Loss 0.6520 (0.6520) Prec@1 87.500 (87.500)
* epoch: 90 Prec@1 89.489
* epoch: 90 Prec@1 89.489
Checkpoint Saved: output/All/train_2020-03-26-17-10-28_model=MobilenetV3-ep=3000-block=6/checkpoint.pth.tar
Epoch: [91][0/12] Time 2.019 (2.019) Loss 0.1967 (0.1967) Prec@1 93.750 (93.750)
Epoch: [91][10/12] Time 0.160 (0.328) Loss 0.2092 (0.1974) Prec@1 91.406 (93.146)
Test: [0/2] Time 1.717 (1.717) Loss 0.5337 (0.5337) Prec@1 89.453 (89.453)
* epoch: 91 Prec@1 89.489
* epoch: 91 Prec@1 89.489
Checkpoint Saved: output/All/train_2020-03-26-17-10-28_model=MobilenetV3-ep=3000-block=6/checkpoint.pth.tar
Epoch: [92][0/12] Time 1.908 (1.908) Loss 0.1796 (0.1796) Prec@1 93.750 (93.750)
Epoch: [92][10/12] Time 0.160 (0.320) Loss 0.2263 (0.2480) Prec@1 93.359 (91.868)
Test: [0/2] Time 1.715 (1.715) Loss 0.4933 (0.4933) Prec@1 88.672 (88.672)
* epoch: 92 Prec@1 89.189
* epoch: 92 Prec@1 89.189
Checkpoint Saved: output/All/train_2020-03-26-17-10-28_model=MobilenetV3-ep=3000-block=6/checkpoint.pth.tar
Epoch: [93][0/12] Time 1.934 (1.934) Loss 0.1817 (0.1817) Prec@1 93.750 (93.750)
Epoch: [93][10/12] Time 0.158 (0.320) Loss 0.2488 (0.2133) Prec@1 90.625 (92.330)
Test: [0/2] Time 1.699 (1.699) Loss 0.7086 (0.7086) Prec@1 80.469 (80.469)
* epoch: 93 Prec@1 79.279
* epoch: 93 Prec@1 79.279
Checkpoint Saved: output/All/train_2020-03-26-17-10-28_model=MobilenetV3-ep=3000-block=6/checkpoint.pth.tar
Epoch: [94][0/12] Time 1.955 (1.955) Loss 0.2422 (0.2422) Prec@1 89.453 (89.453)
Epoch: [94][10/12] Time 0.159 (0.322) Loss 0.1780 (0.2533) Prec@1 93.359 (90.518)
Test: [0/2] Time 1.699 (1.699) Loss 0.6035 (0.6035) Prec@1 89.062 (89.062)
* epoch: 94 Prec@1 89.189
* epoch: 94 Prec@1 89.189
Checkpoint Saved: output/All/train_2020-03-26-17-10-28_model=MobilenetV3-ep=3000-block=6/checkpoint.pth.tar
Epoch: [95][0/12] Time 1.975 (1.975) Loss 0.2582 (0.2582) Prec@1 90.234 (90.234)
Epoch: [95][10/12] Time 0.159 (0.323) Loss 0.2944 (0.2084) Prec@1 92.188 (92.791)
Test: [0/2] Time 1.730 (1.730) Loss 1.0666 (1.0666) Prec@1 67.188 (67.188)
* epoch: 95 Prec@1 66.967
* epoch: 95 Prec@1 66.967
Checkpoint Saved: output/All/train_2020-03-26-17-10-28_model=MobilenetV3-ep=3000-block=6/checkpoint.pth.tar
Epoch: [96][0/12] Time 1.966 (1.966) Loss 0.1643 (0.1643) Prec@1 94.141 (94.141)
Epoch: [96][10/12] Time 0.160 (0.331) Loss 0.2444 (0.2336) Prec@1 91.016 (92.294)
Test: [0/2] Time 1.694 (1.694) Loss 0.5861 (0.5861) Prec@1 86.328 (86.328)
* epoch: 96 Prec@1 86.486
* epoch: 96 Prec@1 86.486
Checkpoint Saved: output/All/train_2020-03-26-17-10-28_model=MobilenetV3-ep=3000-block=6/checkpoint.pth.tar
Epoch: [97][0/12] Time 1.914 (1.914) Loss 0.1659 (0.1659) Prec@1 94.531 (94.531)
Epoch: [97][10/12] Time 0.160 (0.330) Loss 0.2504 (0.1904) Prec@1 92.578 (93.466)
Test: [0/2] Time 1.678 (1.678) Loss 0.6894 (0.6894) Prec@1 78.906 (78.906)
* epoch: 97 Prec@1 80.480
* epoch: 97 Prec@1 80.480
Checkpoint Saved: output/All/train_2020-03-26-17-10-28_model=MobilenetV3-ep=3000-block=6/checkpoint.pth.tar
Epoch: [98][0/12] Time 1.902 (1.902) Loss 0.2155 (0.2155) Prec@1 91.406 (91.406)
Epoch: [98][10/12] Time 0.159 (0.317) Loss 0.2941 (0.2129) Prec@1 92.578 (92.401)
Test: [0/2] Time 1.687 (1.687) Loss 0.3753 (0.3753) Prec@1 94.922 (94.922)
* epoch: 98 Prec@1 93.093
* epoch: 98 Prec@1 93.093
Checkpoint Saved: output/All/train_2020-03-26-17-10-28_model=MobilenetV3-ep=3000-block=6/checkpoint.pth.tar
New Best Checkpoint saved: output/All/train_2020-03-26-17-10-28_model=MobilenetV3-ep=3000-block=6/model_best.pth.tar
Epoch: [99][0/12] Time 1.923 (1.923) Loss 0.1940 (0.1940) Prec@1 95.312 (95.312)
Epoch: [99][10/12] Time 0.160 (0.321) Loss 0.1865 (0.1930) Prec@1 93.750 (93.537)
Test: [0/2] Time 1.672 (1.672) Loss 0.5767 (0.5767) Prec@1 83.594 (83.594)
* epoch: 99 Prec@1 83.483
* epoch: 99 Prec@1 83.483
Checkpoint Saved: output/All/train_2020-03-26-17-10-28_model=MobilenetV3-ep=3000-block=6/checkpoint.pth.tar
Epoch: [100][0/12] Time 1.946 (1.946) Loss 0.3028 (0.3028) Prec@1 91.016 (91.016)
Epoch: [100][10/12] Time 0.159 (0.323) Loss 0.2235 (0.2103) Prec@1 91.406 (92.969)
Test: [0/2] Time 1.704 (1.704) Loss 0.5625 (0.5625) Prec@1 89.844 (89.844)
* epoch: 100 Prec@1 88.889
* epoch: 100 Prec@1 88.889
Checkpoint Saved: output/All/train_2020-03-26-17-10-28_model=MobilenetV3-ep=3000-block=6/checkpoint.pth.tar
Epoch: [101][0/12] Time 1.920 (1.920) Loss 0.3380 (0.3380) Prec@1 89.844 (89.844)
Epoch: [101][10/12] Time 0.159 (0.320) Loss 0.1733 (0.1909) Prec@1 92.188 (93.679)
Test: [0/2] Time 1.678 (1.678) Loss 0.6445 (0.6445) Prec@1 89.062 (89.062)
* epoch: 101 Prec@1 88.589
* epoch: 101 Prec@1 88.589
Checkpoint Saved: output/All/train_2020-03-26-17-10-28_model=MobilenetV3-ep=3000-block=6/checkpoint.pth.tar
Epoch: [102][0/12] Time 1.895 (1.895) Loss 0.2093 (0.2093) Prec@1 92.969 (92.969)
Epoch: [102][10/12] Time 0.159 (0.317) Loss 0.2647 (0.2172) Prec@1 89.844 (92.791)
Test: [0/2] Time 1.691 (1.691) Loss 0.4537 (0.4537) Prec@1 89.844 (89.844)
* epoch: 102 Prec@1 90.390
* epoch: 102 Prec@1 90.390
Checkpoint Saved: output/All/train_2020-03-26-17-10-28_model=MobilenetV3-ep=3000-block=6/checkpoint.pth.tar
Epoch: [103][0/12] Time 1.975 (1.975) Loss 0.1979 (0.1979) Prec@1 92.969 (92.969)
Epoch: [103][10/12] Time 0.159 (0.324) Loss 0.2140 (0.1836) Prec@1 89.844 (93.217)
Test: [0/2] Time 1.706 (1.706) Loss 0.7860 (0.7860) Prec@1 89.062 (89.062)
* epoch: 103 Prec@1 90.090
* epoch: 103 Prec@1 90.090
Checkpoint Saved: output/All/train_2020-03-26-17-10-28_model=MobilenetV3-ep=3000-block=6/checkpoint.pth.tar
Epoch: [104][0/12] Time 2.022 (2.022) Loss 0.2810 (0.2810) Prec@1 92.578 (92.578)
Epoch: [104][10/12] Time 0.159 (0.330) Loss 0.2480 (0.2219) Prec@1 91.797 (92.081)
Test: [0/2] Time 1.716 (1.716) Loss 0.6215 (0.6215) Prec@1 90.625 (90.625)
* epoch: 104 Prec@1 91.592
* epoch: 104 Prec@1 91.592
Checkpoint Saved: output/All/train_2020-03-26-17-10-28_model=MobilenetV3-ep=3000-block=6/checkpoint.pth.tar
Epoch: [105][0/12] Time 1.906 (1.906) Loss 0.1642 (0.1642) Prec@1 95.312 (95.312)
Epoch: [105][10/12] Time 0.159 (0.319) Loss 0.1640 (0.1851) Prec@1 93.750 (93.928)
Test: [0/2] Time 1.708 (1.708) Loss 0.5004 (0.5004) Prec@1 90.625 (90.625)
* epoch: 105 Prec@1 90.390
* epoch: 105 Prec@1 90.390
Checkpoint Saved: output/All/train_2020-03-26-17-10-28_model=MobilenetV3-ep=3000-block=6/checkpoint.pth.tar
Epoch: [106][0/12] Time 1.909 (1.909) Loss 0.1803 (0.1803) Prec@1 90.234 (90.234)
Epoch: [106][10/12] Time 0.158 (0.319) Loss 0.1994 (0.2141) Prec@1 91.797 (92.223)
Test: [0/2] Time 1.688 (1.688) Loss 0.4920 (0.4920) Prec@1 82.031 (82.031)
* epoch: 106 Prec@1 79.580
* epoch: 106 Prec@1 79.580
Checkpoint Saved: output/All/train_2020-03-26-17-10-28_model=MobilenetV3-ep=3000-block=6/checkpoint.pth.tar
Epoch: [107][0/12] Time 1.971 (1.971) Loss 0.1579 (0.1579) Prec@1 92.969 (92.969)
Epoch: [107][10/12] Time 0.160 (0.330) Loss 0.1896 (0.1695) Prec@1 93.359 (93.786)
Test: [0/2] Time 1.693 (1.693) Loss 0.6627 (0.6627) Prec@1 85.156 (85.156)
* epoch: 107 Prec@1 85.285
* epoch: 107 Prec@1 85.285
Checkpoint Saved: output/All/train_2020-03-26-17-10-28_model=MobilenetV3-ep=3000-block=6/checkpoint.pth.tar
Epoch: [108][0/12] Time 1.926 (1.926) Loss 0.2721 (0.2721) Prec@1 90.234 (90.234)
Epoch: [108][10/12] Time 0.159 (0.334) Loss 0.1391 (0.1956) Prec@1 94.531 (93.395)
Test: [0/2] Time 1.693 (1.693) Loss 0.6169 (0.6169) Prec@1 87.891 (87.891)
* epoch: 108 Prec@1 88.889
* epoch: 108 Prec@1 88.889
Checkpoint Saved: output/All/train_2020-03-26-17-10-28_model=MobilenetV3-ep=3000-block=6/checkpoint.pth.tar
Epoch: [109][0/12] Time 1.932 (1.932) Loss 0.1255 (0.1255) Prec@1 96.094 (96.094)
Epoch: [109][10/12] Time 0.159 (0.318) Loss 0.1880 (0.1642) Prec@1 93.750 (94.567)
Test: [0/2] Time 1.702 (1.702) Loss 0.6942 (0.6942) Prec@1 86.719 (86.719)
* epoch: 109 Prec@1 88.288
* epoch: 109 Prec@1 88.288
Checkpoint Saved: output/All/train_2020-03-26-17-10-28_model=MobilenetV3-ep=3000-block=6/checkpoint.pth.tar
Epoch: [110][0/12] Time 1.941 (1.941) Loss 0.1494 (0.1494) Prec@1 95.312 (95.312)
Epoch: [110][10/12] Time 0.159 (0.320) Loss 0.2451 (0.2071) Prec@1 91.016 (92.330)
Test: [0/2] Time 1.721 (1.721) Loss 0.5960 (0.5960) Prec@1 86.328 (86.328)
* epoch: 110 Prec@1 86.486
* epoch: 110 Prec@1 86.486
Checkpoint Saved: output/All/train_2020-03-26-17-10-28_model=MobilenetV3-ep=3000-block=6/checkpoint.pth.tar
Epoch: [111][0/12] Time 1.969 (1.969) Loss 0.1588 (0.1588) Prec@1 94.141 (94.141)
Epoch: [111][10/12] Time 0.160 (0.323) Loss 0.1406 (0.1750) Prec@1 96.094 (93.395)
Test: [0/2] Time 1.680 (1.680) Loss 0.6977 (0.6977) Prec@1 90.625 (90.625)
* epoch: 111 Prec@1 90.090
* epoch: 111 Prec@1 90.090
Checkpoint Saved: output/All/train_2020-03-26-17-10-28_model=MobilenetV3-ep=3000-block=6/checkpoint.pth.tar
Epoch: [112][0/12] Time 1.914 (1.914) Loss 0.2185 (0.2185) Prec@1 92.578 (92.578)
Epoch: [112][10/12] Time 0.161 (0.318) Loss 0.1134 (0.1589) Prec@1 94.922 (94.389)
Test: [0/2] Time 1.692 (1.692) Loss 0.6469 (0.6469) Prec@1 89.062 (89.062)
* epoch: 112 Prec@1 89.489
* epoch: 112 Prec@1 89.489
Checkpoint Saved: output/All/train_2020-03-26-17-10-28_model=MobilenetV3-ep=3000-block=6/checkpoint.pth.tar
Epoch: [113][0/12] Time 1.978 (1.978) Loss 0.1283 (0.1283) Prec@1 95.703 (95.703)
Epoch: [113][10/12] Time 0.158 (0.323) Loss 0.2125 (0.1576) Prec@1 93.359 (94.815)
Test: [0/2] Time 1.699 (1.699) Loss 0.9452 (0.9452) Prec@1 67.578 (67.578)
* epoch: 113 Prec@1 65.766
* epoch: 113 Prec@1 65.766
Checkpoint Saved: output/All/train_2020-03-26-17-10-28_model=MobilenetV3-ep=3000-block=6/checkpoint.pth.tar
Epoch: [114][0/12] Time 1.950 (1.950) Loss 0.2040 (0.2040) Prec@1 92.188 (92.188)
Epoch: [114][10/12] Time 0.159 (0.322) Loss 0.2110 (0.1899) Prec@1 94.531 (93.253)
Test: [0/2] Time 1.687 (1.687) Loss 0.5138 (0.5138) Prec@1 86.719 (86.719)
* epoch: 114 Prec@1 86.486
* epoch: 114 Prec@1 86.486
Checkpoint Saved: output/All/train_2020-03-26-17-10-28_model=MobilenetV3-ep=3000-block=6/checkpoint.pth.tar
Epoch: [115][0/12] Time 1.998 (1.998) Loss 0.1638 (0.1638) Prec@1 95.703 (95.703)
Epoch: [115][10/12] Time 0.160 (0.331) Loss 0.2106 (0.1672) Prec@1 92.578 (94.354)
Test: [0/2] Time 1.709 (1.709) Loss 0.8591 (0.8591) Prec@1 89.062 (89.062)
* epoch: 115 Prec@1 89.790
* epoch: 115 Prec@1 89.790
Checkpoint Saved: output/All/train_2020-03-26-17-10-28_model=MobilenetV3-ep=3000-block=6/checkpoint.pth.tar
Epoch: [116][0/12] Time 1.976 (1.976) Loss 0.1392 (0.1392) Prec@1 92.969 (92.969)
Epoch: [116][10/12] Time 0.160 (0.336) Loss 0.2718 (0.2105) Prec@1 92.578 (93.253)
Test: [0/2] Time 1.718 (1.718) Loss 0.4857 (0.4857) Prec@1 88.281 (88.281)
* epoch: 116 Prec@1 88.589
* epoch: 116 Prec@1 88.589
Checkpoint Saved: output/All/train_2020-03-26-17-10-28_model=MobilenetV3-ep=3000-block=6/checkpoint.pth.tar
Epoch: [117][0/12] Time 2.203 (2.203) Loss 0.1616 (0.1616) Prec@1 92.969 (92.969)
Epoch: [117][10/12] Time 0.159 (0.345) Loss 0.1600 (0.1427) Prec@1 94.141 (94.638)
Test: [0/2] Time 1.732 (1.732) Loss 0.8432 (0.8432) Prec@1 89.453 (89.453)
* epoch: 117 Prec@1 90.691
* epoch: 117 Prec@1 90.691
Checkpoint Saved: output/All/train_2020-03-26-17-10-28_model=MobilenetV3-ep=3000-block=6/checkpoint.pth.tar
Epoch: [118][0/12] Time 1.918 (1.918) Loss 0.1563 (0.1563) Prec@1 94.141 (94.141)
Epoch: [118][10/12] Time 0.160 (0.319) Loss 0.1545 (0.1740) Prec@1 93.750 (93.892)
Test: [0/2] Time 1.722 (1.722) Loss 0.4324 (0.4324) Prec@1 91.797 (91.797)
* epoch: 118 Prec@1 90.991
* epoch: 118 Prec@1 90.991
Checkpoint Saved: output/All/train_2020-03-26-17-10-28_model=MobilenetV3-ep=3000-block=6/checkpoint.pth.tar
Epoch: [119][0/12] Time 1.912 (1.912) Loss 0.1632 (0.1632) Prec@1 92.578 (92.578)
Epoch: [119][10/12] Time 0.160 (0.317) Loss 0.1550 (0.1470) Prec@1 94.922 (94.638)
Test: [0/2] Time 1.743 (1.743) Loss 0.5448 (0.5448) Prec@1 86.719 (86.719)
* epoch: 119 Prec@1 85.886
* epoch: 119 Prec@1 85.886
Checkpoint Saved: output/All/train_2020-03-26-17-10-28_model=MobilenetV3-ep=3000-block=6/checkpoint.pth.tar
Epoch: [120][0/12] Time 1.956 (1.956) Loss 0.1617 (0.1617) Prec@1 95.703 (95.703)
Epoch: [120][10/12] Time 0.159 (0.322) Loss 0.1568 (0.1884) Prec@1 94.531 (93.466)
Test: [0/2] Time 1.724 (1.724) Loss 0.4884 (0.4884) Prec@1 89.062 (89.062)
* epoch: 120 Prec@1 89.189
* epoch: 120 Prec@1 89.189
Checkpoint Saved: output/All/train_2020-03-26-17-10-28_model=MobilenetV3-ep=3000-block=6/checkpoint.pth.tar
Epoch: [121][0/12] Time 2.252 (2.252) Loss 0.0956 (0.0956) Prec@1 96.484 (96.484)
Epoch: [121][10/12] Time 0.160 (0.350) Loss 0.0892 (0.1378) Prec@1 96.094 (94.425)
Test: [0/2] Time 1.702 (1.702) Loss 0.9220 (0.9220) Prec@1 71.875 (71.875)
* epoch: 121 Prec@1 72.372
* epoch: 121 Prec@1 72.372
Checkpoint Saved: output/All/train_2020-03-26-17-10-28_model=MobilenetV3-ep=3000-block=6/checkpoint.pth.tar
Epoch: [122][0/12] Time 2.193 (2.193) Loss 0.1376 (0.1376) Prec@1 93.359 (93.359)
Epoch: [122][10/12] Time 0.154 (0.344) Loss 0.1217 (0.1669) Prec@1 95.312 (94.034)
Test: [0/2] Time 1.728 (1.728) Loss 0.4749 (0.4749) Prec@1 91.406 (91.406)
* epoch: 122 Prec@1 90.090
* epoch: 122 Prec@1 90.090
Checkpoint Saved: output/All/train_2020-03-26-17-10-28_model=MobilenetV3-ep=3000-block=6/checkpoint.pth.tar
Epoch: [123][0/12] Time 1.969 (1.969) Loss 0.1731 (0.1731) Prec@1 93.359 (93.359)
Epoch: [123][10/12] Time 0.160 (0.332) Loss 0.1657 (0.1350) Prec@1 95.703 (95.419)
Test: [0/2] Time 1.702 (1.702) Loss 0.4422 (0.4422) Prec@1 92.188 (92.188)
* epoch: 123 Prec@1 90.991
* epoch: 123 Prec@1 90.991
Checkpoint Saved: output/All/train_2020-03-26-17-10-28_model=MobilenetV3-ep=3000-block=6/checkpoint.pth.tar
Epoch: [124][0/12] Time 1.910 (1.910) Loss 0.1530 (0.1530) Prec@1 94.922 (94.922)
Epoch: [124][10/12] Time 0.161 (0.335) Loss 0.1130 (0.1614) Prec@1 96.094 (94.744)
Test: [0/2] Time 1.703 (1.703) Loss 0.6778 (0.6778) Prec@1 91.016 (91.016)
* epoch: 124 Prec@1 91.592
* epoch: 124 Prec@1 91.592
Checkpoint Saved: output/All/train_2020-03-26-17-10-28_model=MobilenetV3-ep=3000-block=6/checkpoint.pth.tar
Epoch: [125][0/12] Time 1.927 (1.927) Loss 0.1401 (0.1401) Prec@1 95.312 (95.312)
Epoch: [125][10/12] Time 0.159 (0.334) Loss 0.1760 (0.1236) Prec@1 94.141 (95.774)
Test: [0/2] Time 1.697 (1.697) Loss 0.8132 (0.8132) Prec@1 70.703 (70.703)
* epoch: 125 Prec@1 72.072
* epoch: 125 Prec@1 72.072
Checkpoint Saved: output/All/train_2020-03-26-17-10-28_model=MobilenetV3-ep=3000-block=6/checkpoint.pth.tar
Epoch: [126][0/12] Time 2.032 (2.032) Loss 0.1410 (0.1410) Prec@1 94.141 (94.141)
Epoch: [126][10/12] Time 0.159 (0.330) Loss 0.1733 (0.1614) Prec@1 96.094 (93.928)
Test: [0/2] Time 1.692 (1.692) Loss 0.4986 (0.4986) Prec@1 86.328 (86.328)
* epoch: 126 Prec@1 87.688
* epoch: 126 Prec@1 87.688
Checkpoint Saved: output/All/train_2020-03-26-17-10-28_model=MobilenetV3-ep=3000-block=6/checkpoint.pth.tar
Epoch: [127][0/12] Time 2.220 (2.220) Loss 0.1754 (0.1754) Prec@1 93.750 (93.750)
Epoch: [127][10/12] Time 0.159 (0.346) Loss 0.1546 (0.1384) Prec@1 93.750 (94.922)
Test: [0/2] Time 1.701 (1.701) Loss 0.9395 (0.9395) Prec@1 85.156 (85.156)
* epoch: 127 Prec@1 86.787
* epoch: 127 Prec@1 86.787
Checkpoint Saved: output/All/train_2020-03-26-17-10-28_model=MobilenetV3-ep=3000-block=6/checkpoint.pth.tar
Epoch: [128][0/12] Time 1.902 (1.902) Loss 0.1456 (0.1456) Prec@1 95.703 (95.703)
Epoch: [128][10/12] Time 0.160 (0.322) Loss 0.1806 (0.1682) Prec@1 93.750 (94.496)
Test: [0/2] Time 1.711 (1.711) Loss 0.5188 (0.5188) Prec@1 90.625 (90.625)
* epoch: 128 Prec@1 90.691
* epoch: 128 Prec@1 90.691
Checkpoint Saved: output/All/train_2020-03-26-17-10-28_model=MobilenetV3-ep=3000-block=6/checkpoint.pth.tar
Epoch: [129][0/12] Time 1.951 (1.951) Loss 0.0664 (0.0664) Prec@1 98.047 (98.047)
Epoch: [129][10/12] Time 0.160 (0.323) Loss 0.1485 (0.1174) Prec@1 94.922 (96.058)
Test: [0/2] Time 1.704 (1.704) Loss 0.6762 (0.6762) Prec@1 89.062 (89.062)
* epoch: 129 Prec@1 90.090
* epoch: 129 Prec@1 90.090
Checkpoint Saved: output/All/train_2020-03-26-17-10-28_model=MobilenetV3-ep=3000-block=6/checkpoint.pth.tar
Epoch: [130][0/12] Time 1.943 (1.943) Loss 0.2280 (0.2280) Prec@1 92.578 (92.578)
Epoch: [130][10/12] Time 0.160 (0.332) Loss 0.2291 (0.1751) Prec@1 94.531 (93.786)
Test: [0/2] Time 1.717 (1.717) Loss 0.4670 (0.4670) Prec@1 91.406 (91.406)
* epoch: 130 Prec@1 91.892
* epoch: 130 Prec@1 91.892
Checkpoint Saved: output/All/train_2020-03-26-17-10-28_model=MobilenetV3-ep=3000-block=6/checkpoint.pth.tar
Epoch: [131][0/12] Time 2.158 (2.158) Loss 0.1494 (0.1494) Prec@1 94.141 (94.141)
Epoch: [131][10/12] Time 0.159 (0.341) Loss 0.1707 (0.1408) Prec@1 93.750 (94.744)
Test: [0/2] Time 1.699 (1.699) Loss 0.4758 (0.4758) Prec@1 87.891 (87.891)
* epoch: 131 Prec@1 87.387
* epoch: 131 Prec@1 87.387
Checkpoint Saved: output/All/train_2020-03-26-17-10-28_model=MobilenetV3-ep=3000-block=6/checkpoint.pth.tar
Epoch: [132][0/12] Time 1.991 (1.991) Loss 0.1658 (0.1658) Prec@1 92.578 (92.578)
Epoch: [132][10/12] Time 0.159 (0.326) Loss 0.1663 (0.1657) Prec@1 94.922 (94.212)
Test: [0/2] Time 1.708 (1.708) Loss 0.5929 (0.5929) Prec@1 91.016 (91.016)
* epoch: 132 Prec@1 91.892
* epoch: 132 Prec@1 91.892
Checkpoint Saved: output/All/train_2020-03-26-17-10-28_model=MobilenetV3-ep=3000-block=6/checkpoint.pth.tar
Epoch: [133][0/12] Time 1.973 (1.973) Loss 0.0819 (0.0819) Prec@1 97.656 (97.656)
Epoch: [133][10/12] Time 0.159 (0.324) Loss 0.1221 (0.1269) Prec@1 94.922 (95.135)
Test: [0/2] Time 1.696 (1.696) Loss 0.4731 (0.4731) Prec@1 92.578 (92.578)
* epoch: 133 Prec@1 92.793
* epoch: 133 Prec@1 92.793
Checkpoint Saved: output/All/train_2020-03-26-17-10-28_model=MobilenetV3-ep=3000-block=6/checkpoint.pth.tar
Epoch: [134][0/12] Time 1.971 (1.971) Loss 0.0957 (0.0957) Prec@1 98.047 (98.047)
Epoch: [134][10/12] Time 0.160 (0.324) Loss 0.1335 (0.1506) Prec@1 93.359 (94.567)
Test: [0/2] Time 1.715 (1.715) Loss 0.5484 (0.5484) Prec@1 91.797 (91.797)
* epoch: 134 Prec@1 92.192
* epoch: 134 Prec@1 92.192
Checkpoint Saved: output/All/train_2020-03-26-17-10-28_model=MobilenetV3-ep=3000-block=6/checkpoint.pth.tar
Epoch: [135][0/12] Time 2.178 (2.178) Loss 0.1302 (0.1302) Prec@1 93.750 (93.750)
Epoch: [135][10/12] Time 0.161 (0.342) Loss 0.1452 (0.1391) Prec@1 94.922 (94.922)
Test: [0/2] Time 1.709 (1.709) Loss 0.7818 (0.7818) Prec@1 89.453 (89.453)
* epoch: 135 Prec@1 89.489
* epoch: 135 Prec@1 89.489
Checkpoint Saved: output/All/train_2020-03-26-17-10-28_model=MobilenetV3-ep=3000-block=6/checkpoint.pth.tar
Epoch: [136][0/12] Time 1.960 (1.960) Loss 0.1286 (0.1286) Prec@1 96.094 (96.094)
Epoch: [136][10/12] Time 0.160 (0.323) Loss 0.0955 (0.1380) Prec@1 97.656 (94.709)
Test: [0/2] Time 1.717 (1.717) Loss 0.5795 (0.5795) Prec@1 89.453 (89.453)
* epoch: 136 Prec@1 90.090
* epoch: 136 Prec@1 90.090
Checkpoint Saved: output/All/train_2020-03-26-17-10-28_model=MobilenetV3-ep=3000-block=6/checkpoint.pth.tar
Epoch: [137][0/12] Time 1.968 (1.968) Loss 0.1092 (0.1092) Prec@1 94.531 (94.531)
Epoch: [137][10/12] Time 0.160 (0.324) Loss 0.2211 (0.1232) Prec@1 93.359 (95.526)
Number of model parameters: 154706
=> loading checkpoint 'output/Error/12613_model=MobilenetV3-ep=3000-block=4/model_best.pth.tar'
=> loaded checkpoint 'output/Error/12613_model=MobilenetV3-ep=3000-block=4/model_best.pth.tar' (epoch 2877)
* Prec@1 98.193
* Prec@1 98.193
Best accuracy: 98.19277163585984
[validate_2020-03-26-17-09-34] done
[validate_2020-03-26-17-09-34] done
start test using path : ../data/Fourth_data/demo
Test start
loading checkpoint...
checkpoint already loaded!
start test
data path directory is ../data/Fourth_data/demo
finish test
set Type
start test using path : ../data/Fourth_data/demo
Test start
loading checkpoint...
checkpoint already loaded!
start test
data path directory is ../data/Fourth_data/demo
finish test
train start
load yml file
2020-03-26-17-10-28
use seed 825
use dataset : ../data/Fourth_data/All
{'task': 'All/train_2020-03-26-17-10-28_model=MobilenetV3-ep=3000-block=6', 'modelname': 'MobilenetV3', 'output': 'output', 'checkpoint': 'output/All/16166_model=MobilenetV3-ep=3000-block=6/model_best.pth.tar', 'gpu': [2], 'data': {'train': '../data/Fourth_data/All', 'val': '../data/Fourth_data/All', 'test': '../data/Fourth_data/All'}, 'train': {'epochs': 3000, 'start-epoch': 0, 'batch-size': 256, 'worker': 16, 'weight': [2.0, 4.0, 1.0, 1.0, 3.0, 1.0, 1.0], 'resume': '', 'augment': True, 'size': 224, 'confidence': False}, 'predict': {'batch-size': 256, 'worker': 64, 'cam-class': 'Crack', 'cam': False, 'normalize': True}, 'optimizer': {'lr': 0.1, 'momentum': 0.9, 'weight_decay': 0.0001}, 'loss': {'gamma': 2.0, 'alpha': 0.8}, 'model': {'blocks': 6, 'class': 7}, 'etc': {'tensorboard': False, 'print_freq': 10}, 'id': 'train_2020-03-26-17-10-28'}
using normalize
using no dropout
using SGD
Number of model parameters: 461559
Epoch: [0][0/12] Time 3.022 (3.022) Loss 1.9330 (1.9330) Prec@1 16.016 (16.016)
Epoch: [0][10/12] Time 0.159 (0.411) Loss 1.6084 (1.7482) Prec@1 23.828 (17.898)
Test: [0/2] Time 1.651 (1.651) Loss 1.8596 (1.8596) Prec@1 10.938 (10.938)
* epoch: 0 Prec@1 12.012
* epoch: 0 Prec@1 12.012
Checkpoint Saved: output/All/train_2020-03-26-17-10-28_model=MobilenetV3-ep=3000-block=6/checkpoint.pth.tar
New Best Checkpoint saved: output/All/train_2020-03-26-17-10-28_model=MobilenetV3-ep=3000-block=6/model_best.pth.tar
Epoch: [1][0/12] Time 1.962 (1.962) Loss 1.5773 (1.5773) Prec@1 28.516 (28.516)
Epoch: [1][10/12] Time 0.158 (0.323) Loss 1.6759 (1.5514) Prec@1 20.312 (26.136)
Test: [0/2] Time 1.653 (1.653) Loss 1.8462 (1.8462) Prec@1 31.250 (31.250)
* epoch: 1 Prec@1 30.030
* epoch: 1 Prec@1 30.030
Checkpoint Saved: output/All/train_2020-03-26-17-10-28_model=MobilenetV3-ep=3000-block=6/checkpoint.pth.tar
New Best Checkpoint saved: output/All/train_2020-03-26-17-10-28_model=MobilenetV3-ep=3000-block=6/model_best.pth.tar
Epoch: [2][0/12] Time 1.952 (1.952) Loss 1.5458 (1.5458) Prec@1 26.562 (26.562)
Epoch: [2][10/12] Time 0.157 (0.331) Loss 1.2252 (1.3803) Prec@1 42.969 (31.889)
Test: [0/2] Time 1.674 (1.674) Loss 1.7407 (1.7407) Prec@1 30.859 (30.859)
* epoch: 2 Prec@1 32.733
* epoch: 2 Prec@1 32.733
Checkpoint Saved: output/All/train_2020-03-26-17-10-28_model=MobilenetV3-ep=3000-block=6/checkpoint.pth.tar
New Best Checkpoint saved: output/All/train_2020-03-26-17-10-28_model=MobilenetV3-ep=3000-block=6/model_best.pth.tar
Epoch: [3][0/12] Time 1.960 (1.960) Loss 1.2237 (1.2237) Prec@1 39.062 (39.062)
Epoch: [3][10/12] Time 0.155 (0.323) Loss 1.1566 (1.2237) Prec@1 41.797 (41.193)
Test: [0/2] Time 1.687 (1.687) Loss 1.7368 (1.7368) Prec@1 22.266 (22.266)
* epoch: 3 Prec@1 22.823
* epoch: 3 Prec@1 22.823
Checkpoint Saved: output/All/train_2020-03-26-17-10-28_model=MobilenetV3-ep=3000-block=6/checkpoint.pth.tar
Epoch: [4][0/12] Time 1.967 (1.967) Loss 1.3065 (1.3065) Prec@1 48.828 (48.828)
Epoch: [4][10/12] Time 0.162 (0.335) Loss 1.0501 (1.1183) Prec@1 58.594 (53.906)
Test: [0/2] Time 1.665 (1.665) Loss 1.1960 (1.1960) Prec@1 53.516 (53.516)
* epoch: 4 Prec@1 52.553
* epoch: 4 Prec@1 52.553
Checkpoint Saved: output/All/train_2020-03-26-17-10-28_model=MobilenetV3-ep=3000-block=6/checkpoint.pth.tar
New Best Checkpoint saved: output/All/train_2020-03-26-17-10-28_model=MobilenetV3-ep=3000-block=6/model_best.pth.tar
Epoch: [5][0/12] Time 1.930 (1.930) Loss 1.0283 (1.0283) Prec@1 61.719 (61.719)
Epoch: [5][10/12] Time 0.159 (0.320) Loss 1.0155 (1.0390) Prec@1 58.594 (60.227)
Test: [0/2] Time 1.692 (1.692) Loss 1.2309 (1.2309) Prec@1 60.938 (60.938)
* epoch: 5 Prec@1 60.360
* epoch: 5 Prec@1 60.360
Checkpoint Saved: output/All/train_2020-03-26-17-10-28_model=MobilenetV3-ep=3000-block=6/checkpoint.pth.tar
New Best Checkpoint saved: output/All/train_2020-03-26-17-10-28_model=MobilenetV3-ep=3000-block=6/model_best.pth.tar
Epoch: [6][0/12] Time 2.214 (2.214) Loss 0.9515 (0.9515) Prec@1 64.844 (64.844)
Epoch: [6][10/12] Time 0.158 (0.346) Loss 0.7833 (0.9050) Prec@1 75.781 (68.999)
Test: [0/2] Time 1.663 (1.663) Loss 0.9625 (0.9625) Prec@1 49.609 (49.609)
* epoch: 6 Prec@1 49.850
* epoch: 6 Prec@1 49.850
Checkpoint Saved: output/All/train_2020-03-26-17-10-28_model=MobilenetV3-ep=3000-block=6/checkpoint.pth.tar
Epoch: [7][0/12] Time 1.919 (1.919) Loss 0.7668 (0.7668) Prec@1 76.172 (76.172)
Epoch: [7][10/12] Time 0.158 (0.319) Loss 0.9260 (0.8527) Prec@1 63.672 (71.768)
Test: [0/2] Time 1.675 (1.675) Loss 1.1891 (1.1891) Prec@1 61.328 (61.328)
* epoch: 7 Prec@1 58.859
* epoch: 7 Prec@1 58.859
Checkpoint Saved: output/All/train_2020-03-26-17-10-28_model=MobilenetV3-ep=3000-block=6/checkpoint.pth.tar
Epoch: [8][0/12] Time 2.244 (2.244) Loss 0.8546 (0.8546) Prec@1 72.266 (72.266)
Epoch: [8][10/12] Time 0.159 (0.348) Loss 0.7712 (0.8669) Prec@1 76.562 (71.094)
Test: [0/2] Time 1.707 (1.707) Loss 0.9092 (0.9092) Prec@1 63.281 (63.281)
* epoch: 8 Prec@1 62.462
* epoch: 8 Prec@1 62.462
Checkpoint Saved: output/All/train_2020-03-26-17-10-28_model=MobilenetV3-ep=3000-block=6/checkpoint.pth.tar
New Best Checkpoint saved: output/All/train_2020-03-26-17-10-28_model=MobilenetV3-ep=3000-block=6/model_best.pth.tar
Epoch: [9][0/12] Time 1.925 (1.925) Loss 0.7878 (0.7878) Prec@1 78.516 (78.516)
Epoch: [9][10/12] Time 0.160 (0.325) Loss 0.7174 (0.7807) Prec@1 69.922 (73.509)
Test: [0/2] Time 1.707 (1.707) Loss 0.9544 (0.9544) Prec@1 62.891 (62.891)
* epoch: 9 Prec@1 60.661
* epoch: 9 Prec@1 60.661
Checkpoint Saved: output/All/train_2020-03-26-17-10-28_model=MobilenetV3-ep=3000-block=6/checkpoint.pth.tar
Epoch: [10][0/12] Time 2.220 (2.220) Loss 0.9374 (0.9374) Prec@1 67.188 (67.188)
Epoch: [10][10/12] Time 0.159 (0.347) Loss 0.7273 (0.7483) Prec@1 80.078 (75.497)
Test: [0/2] Time 1.696 (1.696) Loss 0.9895 (0.9895) Prec@1 47.656 (47.656)
* epoch: 10 Prec@1 48.649
* epoch: 10 Prec@1 48.649
Checkpoint Saved: output/All/train_2020-03-26-17-10-28_model=MobilenetV3-ep=3000-block=6/checkpoint.pth.tar
Epoch: [11][0/12] Time 1.974 (1.974) Loss 0.6920 (0.6920) Prec@1 75.391 (75.391)
Epoch: [11][10/12] Time 0.154 (0.324) Loss 0.7892 (0.6962) Prec@1 71.484 (75.781)
Test: [0/2] Time 1.665 (1.665) Loss 1.1736 (1.1736) Prec@1 78.125 (78.125)
* epoch: 11 Prec@1 77.778
* epoch: 11 Prec@1 77.778
Checkpoint Saved: output/All/train_2020-03-26-17-10-28_model=MobilenetV3-ep=3000-block=6/checkpoint.pth.tar
New Best Checkpoint saved: output/All/train_2020-03-26-17-10-28_model=MobilenetV3-ep=3000-block=6/model_best.pth.tar
Epoch: [12][0/12] Time 1.927 (1.927) Loss 0.7375 (0.7375) Prec@1 78.906 (78.906)
Epoch: [12][10/12] Time 0.159 (0.328) Loss 0.6453 (0.6972) Prec@1 79.688 (77.308)
Test: [0/2] Time 1.703 (1.703) Loss 0.7701 (0.7701) Prec@1 79.688 (79.688)
* epoch: 12 Prec@1 79.279
* epoch: 12 Prec@1 79.279
Checkpoint Saved: output/All/train_2020-03-26-17-10-28_model=MobilenetV3-ep=3000-block=6/checkpoint.pth.tar
New Best Checkpoint saved: output/All/train_2020-03-26-17-10-28_model=MobilenetV3-ep=3000-block=6/model_best.pth.tar
Epoch: [13][0/12] Time 1.983 (1.983) Loss 0.6737 (0.6737) Prec@1 79.688 (79.688)
Epoch: [13][10/12] Time 0.158 (0.325) Loss 0.6125 (0.6647) Prec@1 79.688 (78.942)
Test: [0/2] Time 1.673 (1.673) Loss 1.7411 (1.7411) Prec@1 44.922 (44.922)
* epoch: 13 Prec@1 44.745
* epoch: 13 Prec@1 44.745
Checkpoint Saved: output/All/train_2020-03-26-17-10-28_model=MobilenetV3-ep=3000-block=6/checkpoint.pth.tar
Epoch: [14][0/12] Time 1.969 (1.969) Loss 0.7033 (0.7033) Prec@1 76.953 (76.953)
Epoch: [14][10/12] Time 0.160 (0.328) Loss 0.7069 (0.6706) Prec@1 78.516 (78.196)
Test: [0/2] Time 1.689 (1.689) Loss 0.7330 (0.7330) Prec@1 80.078 (80.078)
* epoch: 14 Prec@1 80.781
* epoch: 14 Prec@1 80.781
Checkpoint Saved: output/All/train_2020-03-26-17-10-28_model=MobilenetV3-ep=3000-block=6/checkpoint.pth.tar
New Best Checkpoint saved: output/All/train_2020-03-26-17-10-28_model=MobilenetV3-ep=3000-block=6/model_best.pth.tar
Epoch: [15][0/12] Time 1.957 (1.957) Loss 0.6477 (0.6477) Prec@1 76.562 (76.562)
Epoch: [15][10/12] Time 0.160 (0.323) Loss 0.6251 (0.6128) Prec@1 81.641 (80.007)
Test: [0/2] Time 1.675 (1.675) Loss 0.7821 (0.7821) Prec@1 76.562 (76.562)
* epoch: 15 Prec@1 76.877
* epoch: 15 Prec@1 76.877
Checkpoint Saved: output/All/train_2020-03-26-17-10-28_model=MobilenetV3-ep=3000-block=6/checkpoint.pth.tar
Epoch: [16][0/12] Time 2.133 (2.133) Loss 0.6314 (0.6314) Prec@1 77.734 (77.734)
Epoch: [16][10/12] Time 0.159 (0.338) Loss 0.6333 (0.6552) Prec@1 80.469 (79.190)
Test: [0/2] Time 1.686 (1.686) Loss 0.8334 (0.8334) Prec@1 81.641 (81.641)
* epoch: 16 Prec@1 81.982
* epoch: 16 Prec@1 81.982
Checkpoint Saved: output/All/train_2020-03-26-17-10-28_model=MobilenetV3-ep=3000-block=6/checkpoint.pth.tar
New Best Checkpoint saved: output/All/train_2020-03-26-17-10-28_model=MobilenetV3-ep=3000-block=6/model_best.pth.tar
Epoch: [17][0/12] Time 1.903 (1.903) Loss 0.5610 (0.5610) Prec@1 79.297 (79.297)
Epoch: [17][10/12] Time 0.158 (0.324) Loss 0.5548 (0.5905) Prec@1 77.734 (81.001)
Test: [0/2] Time 1.713 (1.713) Loss 0.7781 (0.7781) Prec@1 82.422 (82.422)
* epoch: 17 Prec@1 81.381
* epoch: 17 Prec@1 81.381
Checkpoint Saved: output/All/train_2020-03-26-17-10-28_model=MobilenetV3-ep=3000-block=6/checkpoint.pth.tar
Epoch: [18][0/12] Time 1.914 (1.914) Loss 0.5484 (0.5484) Prec@1 77.344 (77.344)
Epoch: [18][10/12] Time 0.159 (0.320) Loss 0.6511 (0.6283) Prec@1 79.688 (79.545)
Test: [0/2] Time 1.666 (1.666) Loss 0.8708 (0.8708) Prec@1 62.500 (62.500)
* epoch: 18 Prec@1 63.664
* epoch: 18 Prec@1 63.664
Checkpoint Saved: output/All/train_2020-03-26-17-10-28_model=MobilenetV3-ep=3000-block=6/checkpoint.pth.tar
Epoch: [19][0/12] Time 1.918 (1.918) Loss 0.6416 (0.6416) Prec@1 79.297 (79.297)
Epoch: [19][10/12] Time 0.158 (0.320) Loss 0.5400 (0.5515) Prec@1 83.203 (83.097)
Test: [0/2] Time 1.694 (1.694) Loss 0.6527 (0.6527) Prec@1 81.250 (81.250)
* epoch: 19 Prec@1 81.081
* epoch: 19 Prec@1 81.081
Checkpoint Saved: output/All/train_2020-03-26-17-10-28_model=MobilenetV3-ep=3000-block=6/checkpoint.pth.tar
Epoch: [20][0/12] Time 2.206 (2.206) Loss 0.5358 (0.5358) Prec@1 81.641 (81.641)
Epoch: [20][10/12] Time 0.160 (0.345) Loss 0.6041 (0.5729) Prec@1 77.734 (81.428)
Test: [0/2] Time 1.720 (1.720) Loss 0.5669 (0.5669) Prec@1 83.594 (83.594)
* epoch: 20 Prec@1 81.682
* epoch: 20 Prec@1 81.682
Checkpoint Saved: output/All/train_2020-03-26-17-10-28_model=MobilenetV3-ep=3000-block=6/checkpoint.pth.tar
Epoch: [21][0/12] Time 2.203 (2.203) Loss 0.4921 (0.4921) Prec@1 82.422 (82.422)
Epoch: [21][10/12] Time 0.158 (0.345) Loss 0.4483 (0.5416) Prec@1 84.375 (82.919)
Test: [0/2] Time 1.706 (1.706) Loss 0.7614 (0.7614) Prec@1 78.516 (78.516)
* epoch: 21 Prec@1 78.679
* epoch: 21 Prec@1 78.679
Checkpoint Saved: output/All/train_2020-03-26-17-10-28_model=MobilenetV3-ep=3000-block=6/checkpoint.pth.tar
Epoch: [22][0/12] Time 1.921 (1.921) Loss 0.6461 (0.6461) Prec@1 81.250 (81.250)
Epoch: [22][10/12] Time 0.158 (0.319) Loss 0.5921 (0.5490) Prec@1 82.031 (82.919)
Test: [0/2] Time 1.709 (1.709) Loss 0.6422 (0.6422) Prec@1 82.812 (82.812)
* epoch: 22 Prec@1 83.784
* epoch: 22 Prec@1 83.784
Checkpoint Saved: output/All/train_2020-03-26-17-10-28_model=MobilenetV3-ep=3000-block=6/checkpoint.pth.tar
New Best Checkpoint saved: output/All/train_2020-03-26-17-10-28_model=MobilenetV3-ep=3000-block=6/model_best.pth.tar
Epoch: [23][0/12] Time 1.939 (1.939) Loss 0.4573 (0.4573) Prec@1 87.500 (87.500)
Epoch: [23][10/12] Time 0.159 (0.331) Loss 0.5843 (0.5141) Prec@1 80.469 (84.624)
Test: [0/2] Time 1.685 (1.685) Loss 0.9102 (0.9102) Prec@1 75.000 (75.000)
* epoch: 23 Prec@1 76.577
* epoch: 23 Prec@1 76.577
Checkpoint Saved: output/All/train_2020-03-26-17-10-28_model=MobilenetV3-ep=3000-block=6/checkpoint.pth.tar
Epoch: [24][0/12] Time 1.925 (1.925) Loss 0.7865 (0.7865) Prec@1 78.125 (78.125)
Epoch: [24][10/12] Time 0.160 (0.319) Loss 0.5571 (0.6079) Prec@1 85.938 (80.859)
Test: [0/2] Time 1.712 (1.712) Loss 0.5972 (0.5972) Prec@1 82.422 (82.422)
* epoch: 24 Prec@1 82.583
* epoch: 24 Prec@1 82.583
Checkpoint Saved: output/All/train_2020-03-26-17-10-28_model=MobilenetV3-ep=3000-block=6/checkpoint.pth.tar
Epoch: [25][0/12] Time 1.958 (1.958) Loss 0.5632 (0.5632) Prec@1 83.984 (83.984)
Epoch: [25][10/12] Time 0.159 (0.322) Loss 0.3754 (0.5158) Prec@1 88.281 (83.700)
Test: [0/2] Time 1.701 (1.701) Loss 0.8734 (0.8734) Prec@1 77.734 (77.734)
* epoch: 25 Prec@1 78.378
* epoch: 25 Prec@1 78.378
Checkpoint Saved: output/All/train_2020-03-26-17-10-28_model=MobilenetV3-ep=3000-block=6/checkpoint.pth.tar
Epoch: [26][0/12] Time 2.162 (2.162) Loss 0.6764 (0.6764) Prec@1 81.641 (81.641)
Epoch: [26][10/12] Time 0.160 (0.342) Loss 0.5274 (0.5528) Prec@1 80.469 (82.209)
Test: [0/2] Time 1.696 (1.696) Loss 0.6240 (0.6240) Prec@1 85.938 (85.938)
* epoch: 26 Prec@1 84.084
* epoch: 26 Prec@1 84.084
Checkpoint Saved: output/All/train_2020-03-26-17-10-28_model=MobilenetV3-ep=3000-block=6/checkpoint.pth.tar
New Best Checkpoint saved: output/All/train_2020-03-26-17-10-28_model=MobilenetV3-ep=3000-block=6/model_best.pth.tar
Epoch: [27][0/12] Time 2.181 (2.181) Loss 0.5241 (0.5241) Prec@1 83.203 (83.203)
Epoch: [27][10/12] Time 0.158 (0.343) Loss 0.6147 (0.5227) Prec@1 79.297 (83.736)
Test: [0/2] Time 1.708 (1.708) Loss 0.7268 (0.7268) Prec@1 80.469 (80.469)
* epoch: 27 Prec@1 79.279
* epoch: 27 Prec@1 79.279
Checkpoint Saved: output/All/train_2020-03-26-17-10-28_model=MobilenetV3-ep=3000-block=6/checkpoint.pth.tar
Epoch: [28][0/12] Time 1.966 (1.966) Loss 0.4439 (0.4439) Prec@1 87.500 (87.500)
Epoch: [28][10/12] Time 0.159 (0.323) Loss 0.5766 (0.5243) Prec@1 85.547 (82.955)
Test: [0/2] Time 1.684 (1.684) Loss 0.6416 (0.6416) Prec@1 82.031 (82.031)
* epoch: 28 Prec@1 82.583
* epoch: 28 Prec@1 82.583
Checkpoint Saved: output/All/train_2020-03-26-17-10-28_model=MobilenetV3-ep=3000-block=6/checkpoint.pth.tar
Epoch: [29][0/12] Time 1.943 (1.943) Loss 0.5633 (0.5633) Prec@1 79.297 (79.297)
Epoch: [29][10/12] Time 0.159 (0.321) Loss 0.4740 (0.4708) Prec@1 85.547 (84.730)
Test: [0/2] Time 1.704 (1.704) Loss 0.6950 (0.6950) Prec@1 78.125 (78.125)
* epoch: 29 Prec@1 78.378
* epoch: 29 Prec@1 78.378
Checkpoint Saved: output/All/train_2020-03-26-17-10-28_model=MobilenetV3-ep=3000-block=6/checkpoint.pth.tar
Epoch: [30][0/12] Time 1.953 (1.953) Loss 0.6137 (0.6137) Prec@1 82.422 (82.422)
Epoch: [30][10/12] Time 0.159 (0.322) Loss 0.3596 (0.5028) Prec@1 87.891 (84.553)
Test: [0/2] Time 1.699 (1.699) Loss 0.6546 (0.6546) Prec@1 82.031 (82.031)
* epoch: 30 Prec@1 83.483
* epoch: 30 Prec@1 83.483
Checkpoint Saved: output/All/train_2020-03-26-17-10-28_model=MobilenetV3-ep=3000-block=6/checkpoint.pth.tar
Epoch: [31][0/12] Time 2.174 (2.174) Loss 0.4846 (0.4846) Prec@1 83.984 (83.984)
Epoch: [31][10/12] Time 0.160 (0.343) Loss 0.4474 (0.4548) Prec@1 84.375 (85.050)
Test: [0/2] Time 1.678 (1.678) Loss 0.6281 (0.6281) Prec@1 84.375 (84.375)
* epoch: 31 Prec@1 84.084
* epoch: 31 Prec@1 84.084
Checkpoint Saved: output/All/train_2020-03-26-17-10-28_model=MobilenetV3-ep=3000-block=6/checkpoint.pth.tar
New Best Checkpoint saved: output/All/train_2020-03-26-17-10-28_model=MobilenetV3-ep=3000-block=6/model_best.pth.tar
Epoch: [32][0/12] Time 1.929 (1.929) Loss 0.4780 (0.4780) Prec@1 83.594 (83.594)
Epoch: [32][10/12] Time 0.160 (0.320) Loss 0.4670 (0.5008) Prec@1 85.938 (83.842)
Test: [0/2] Time 1.698 (1.698) Loss 0.5501 (0.5501) Prec@1 82.422 (82.422)
* epoch: 32 Prec@1 82.583
* epoch: 32 Prec@1 82.583
Checkpoint Saved: output/All/train_2020-03-26-17-10-28_model=MobilenetV3-ep=3000-block=6/checkpoint.pth.tar
Epoch: [33][0/12] Time 2.224 (2.224) Loss 0.4372 (0.4372) Prec@1 89.062 (89.062)
Epoch: [33][10/12] Time 0.159 (0.347) Loss 0.4002 (0.4672) Prec@1 87.500 (85.369)
Test: [0/2] Time 1.650 (1.650) Loss 0.9937 (0.9937) Prec@1 65.234 (65.234)
* epoch: 33 Prec@1 65.165
* epoch: 33 Prec@1 65.165
Checkpoint Saved: output/All/train_2020-03-26-17-10-28_model=MobilenetV3-ep=3000-block=6/checkpoint.pth.tar
Epoch: [34][0/12] Time 2.233 (2.233) Loss 0.5080 (0.5080) Prec@1 82.031 (82.031)
Epoch: [34][10/12] Time 0.159 (0.347) Loss 0.4675 (0.4632) Prec@1 84.766 (84.908)
Test: [0/2] Time 1.673 (1.673) Loss 0.6285 (0.6285) Prec@1 83.984 (83.984)
* epoch: 34 Prec@1 84.084
* epoch: 34 Prec@1 84.084
Checkpoint Saved: output/All/train_2020-03-26-17-10-28_model=MobilenetV3-ep=3000-block=6/checkpoint.pth.tar
Epoch: [35][0/12] Time 1.915 (1.915) Loss 0.4910 (0.4910) Prec@1 86.328 (86.328)
Epoch: [35][10/12] Time 0.159 (0.320) Loss 0.3640 (0.4289) Prec@1 88.281 (86.222)
Test: [0/2] Time 1.725 (1.725) Loss 0.9229 (0.9229) Prec@1 80.859 (80.859)
* epoch: 35 Prec@1 80.180
* epoch: 35 Prec@1 80.180
Checkpoint Saved: output/All/train_2020-03-26-17-10-28_model=MobilenetV3-ep=3000-block=6/checkpoint.pth.tar
Epoch: [36][0/12] Time 1.938 (1.938) Loss 0.5173 (0.5173) Prec@1 81.641 (81.641)
Epoch: [36][10/12] Time 0.159 (0.321) Loss 0.5336 (0.4753) Prec@1 83.203 (84.553)
Test: [0/2] Time 1.725 (1.725) Loss 0.5637 (0.5637) Prec@1 87.109 (87.109)
* epoch: 36 Prec@1 86.186
* epoch: 36 Prec@1 86.186
Checkpoint Saved: output/All/train_2020-03-26-17-10-28_model=MobilenetV3-ep=3000-block=6/checkpoint.pth.tar
New Best Checkpoint saved: output/All/train_2020-03-26-17-10-28_model=MobilenetV3-ep=3000-block=6/model_best.pth.tar
Epoch: [37][0/12] Time 2.224 (2.224) Loss 0.3901 (0.3901) Prec@1 86.719 (86.719)
Epoch: [37][10/12] Time 0.159 (0.347) Loss 0.4682 (0.4061) Prec@1 82.422 (86.648)
Test: [0/2] Time 1.686 (1.686) Loss 0.9415 (0.9415) Prec@1 74.219 (74.219)
* epoch: 37 Prec@1 74.474
* epoch: 37 Prec@1 74.474
Checkpoint Saved: output/All/train_2020-03-26-17-10-28_model=MobilenetV3-ep=3000-block=6/checkpoint.pth.tar
Epoch: [38][0/12] Time 1.960 (1.960) Loss 0.4797 (0.4797) Prec@1 84.375 (84.375)
Epoch: [38][10/12] Time 0.159 (0.324) Loss 0.4020 (0.4410) Prec@1 88.672 (85.653)
Test: [0/2] Time 1.692 (1.692) Loss 0.4875 (0.4875) Prec@1 84.375 (84.375)
* epoch: 38 Prec@1 83.483
* epoch: 38 Prec@1 83.483
Checkpoint Saved: output/All/train_2020-03-26-17-10-28_model=MobilenetV3-ep=3000-block=6/checkpoint.pth.tar
Epoch: [39][0/12] Time 1.931 (1.931) Loss 0.4579 (0.4579) Prec@1 85.156 (85.156)
Epoch: [39][10/12] Time 0.159 (0.330) Loss 0.3570 (0.3953) Prec@1 87.500 (86.754)
Test: [0/2] Time 1.673 (1.673) Loss 0.6890 (0.6890) Prec@1 84.766 (84.766)
* epoch: 39 Prec@1 83.784
* epoch: 39 Prec@1 83.784
Checkpoint Saved: output/All/train_2020-03-26-17-10-28_model=MobilenetV3-ep=3000-block=6/checkpoint.pth.tar
Epoch: [40][0/12] Time 1.908 (1.908) Loss 0.5327 (0.5327) Prec@1 82.031 (82.031)
Epoch: [40][10/12] Time 0.160 (0.320) Loss 0.3981 (0.4445) Prec@1 88.281 (85.724)
Test: [0/2] Time 1.706 (1.706) Loss 0.4213 (0.4213) Prec@1 88.672 (88.672)
* epoch: 40 Prec@1 87.387
* epoch: 40 Prec@1 87.387
Checkpoint Saved: output/All/train_2020-03-26-17-10-28_model=MobilenetV3-ep=3000-block=6/checkpoint.pth.tar
New Best Checkpoint saved: output/All/train_2020-03-26-17-10-28_model=MobilenetV3-ep=3000-block=6/model_best.pth.tar
Epoch: [41][0/12] Time 1.924 (1.924) Loss 0.4044 (0.4044) Prec@1 85.938 (85.938)
Epoch: [41][10/12] Time 0.160 (0.318) Loss 0.4958 (0.3959) Prec@1 86.719 (86.861)
Test: [0/2] Time 1.699 (1.699) Loss 0.7511 (0.7511) Prec@1 76.172 (76.172)
* epoch: 41 Prec@1 74.174
* epoch: 41 Prec@1 74.174
Checkpoint Saved: output/All/train_2020-03-26-17-10-28_model=MobilenetV3-ep=3000-block=6/checkpoint.pth.tar
Epoch: [42][0/12] Time 2.204 (2.204) Loss 0.4268 (0.4268) Prec@1 88.281 (88.281)
Epoch: [42][10/12] Time 0.158 (0.345) Loss 0.4259 (0.4605) Prec@1 87.109 (84.979)
Test: [0/2] Time 1.730 (1.730) Loss 0.6196 (0.6196) Prec@1 87.109 (87.109)
* epoch: 42 Prec@1 87.387
* epoch: 42 Prec@1 87.387
Checkpoint Saved: output/All/train_2020-03-26-17-10-28_model=MobilenetV3-ep=3000-block=6/checkpoint.pth.tar
New Best Checkpoint saved: output/All/train_2020-03-26-17-10-28_model=MobilenetV3-ep=3000-block=6/model_best.pth.tar
Epoch: [43][0/12] Time 2.063 (2.063) Loss 0.4617 (0.4617) Prec@1 84.375 (84.375)
Epoch: [43][10/12] Time 0.159 (0.332) Loss 0.4178 (0.3862) Prec@1 87.891 (87.287)
Test: [0/2] Time 1.692 (1.692) Loss 0.7116 (0.7116) Prec@1 78.516 (78.516)
* epoch: 43 Prec@1 78.378
* epoch: 43 Prec@1 78.378
Checkpoint Saved: output/All/train_2020-03-26-17-10-28_model=MobilenetV3-ep=3000-block=6/checkpoint.pth.tar
Epoch: [44][0/12] Time 1.968 (1.968) Loss 0.6055 (0.6055) Prec@1 76.953 (76.953)
Epoch: [44][10/12] Time 0.160 (0.331) Loss 0.4583 (0.4649) Prec@1 85.156 (83.487)
Test: [0/2] Time 1.690 (1.690) Loss 0.4601 (0.4601) Prec@1 87.109 (87.109)
* epoch: 44 Prec@1 86.787
* epoch: 44 Prec@1 86.787
Checkpoint Saved: output/All/train_2020-03-26-17-10-28_model=MobilenetV3-ep=3000-block=6/checkpoint.pth.tar
Epoch: [45][0/12] Time 1.908 (1.908) Loss 0.4238 (0.4238) Prec@1 87.109 (87.109)
Epoch: [45][10/12] Time 0.159 (0.318) Loss 0.5030 (0.3826) Prec@1 82.812 (87.074)
Test: [0/2] Time 1.707 (1.707) Loss 0.7285 (0.7285) Prec@1 79.688 (79.688)
* epoch: 45 Prec@1 80.781
* epoch: 45 Prec@1 80.781
Checkpoint Saved: output/All/train_2020-03-26-17-10-28_model=MobilenetV3-ep=3000-block=6/checkpoint.pth.tar
Epoch: [46][0/12] Time 1.955 (1.955) Loss 0.5765 (0.5765) Prec@1 81.250 (81.250)
Epoch: [46][10/12] Time 0.159 (0.321) Loss 0.3067 (0.4130) Prec@1 88.672 (86.009)
Test: [0/2] Time 1.686 (1.686) Loss 0.4962 (0.4962) Prec@1 89.062 (89.062)
* epoch: 46 Prec@1 88.288
* epoch: 46 Prec@1 88.288
Checkpoint Saved: output/All/train_2020-03-26-17-10-28_model=MobilenetV3-ep=3000-block=6/checkpoint.pth.tar
New Best Checkpoint saved: output/All/train_2020-03-26-17-10-28_model=MobilenetV3-ep=3000-block=6/model_best.pth.tar
Epoch: [47][0/12] Time 1.986 (1.986) Loss 0.3760 (0.3760) Prec@1 87.891 (87.891)
Epoch: [47][10/12] Time 0.159 (0.335) Loss 0.3775 (0.3699) Prec@1 84.766 (87.216)
Test: [0/2] Time 1.697 (1.697) Loss 0.5537 (0.5537) Prec@1 85.938 (85.938)
* epoch: 47 Prec@1 85.886
* epoch: 47 Prec@1 85.886
Checkpoint Saved: output/All/train_2020-03-26-17-10-28_model=MobilenetV3-ep=3000-block=6/checkpoint.pth.tar
Epoch: [48][0/12] Time 1.951 (1.951) Loss 0.3737 (0.3737) Prec@1 87.500 (87.500)
Epoch: [48][10/12] Time 0.159 (0.322) Loss 0.3257 (0.3962) Prec@1 88.672 (87.145)
Test: [0/2] Time 1.693 (1.693) Loss 0.5342 (0.5342) Prec@1 87.109 (87.109)
* epoch: 48 Prec@1 85.586
* epoch: 48 Prec@1 85.586
Checkpoint Saved: output/All/train_2020-03-26-17-10-28_model=MobilenetV3-ep=3000-block=6/checkpoint.pth.tar
Epoch: [49][0/12] Time 1.991 (1.991) Loss 0.3158 (0.3158) Prec@1 88.672 (88.672)
Epoch: [49][10/12] Time 0.159 (0.326) Loss 0.4068 (0.3745) Prec@1 87.109 (87.571)
Test: [0/2] Time 1.684 (1.684) Loss 0.5855 (0.5855) Prec@1 81.641 (81.641)
* epoch: 49 Prec@1 80.781
* epoch: 49 Prec@1 80.781
Checkpoint Saved: output/All/train_2020-03-26-17-10-28_model=MobilenetV3-ep=3000-block=6/checkpoint.pth.tar
Epoch: [50][0/12] Time 1.904 (1.904) Loss 0.6005 (0.6005) Prec@1 79.297 (79.297)
Epoch: [50][10/12] Time 0.159 (0.319) Loss 0.3856 (0.3787) Prec@1 86.328 (87.464)
Test: [0/2] Time 1.680 (1.680) Loss 0.6797 (0.6797) Prec@1 77.344 (77.344)
* epoch: 50 Prec@1 78.979
* epoch: 50 Prec@1 78.979
Checkpoint Saved: output/All/train_2020-03-26-17-10-28_model=MobilenetV3-ep=3000-block=6/checkpoint.pth.tar
Epoch: [51][0/12] Time 2.151 (2.151) Loss 0.3381 (0.3381) Prec@1 89.844 (89.844)
Epoch: [51][10/12] Time 0.157 (0.340) Loss 0.3905 (0.3525) Prec@1 83.594 (88.352)
Test: [0/2] Time 1.705 (1.705) Loss 0.5561 (0.5561) Prec@1 86.328 (86.328)
* epoch: 51 Prec@1 85.886
* epoch: 51 Prec@1 85.886
Checkpoint Saved: output/All/train_2020-03-26-17-10-28_model=MobilenetV3-ep=3000-block=6/checkpoint.pth.tar
Epoch: [52][0/12] Time 1.977 (1.977) Loss 0.3419 (0.3419) Prec@1 88.281 (88.281)
Epoch: [52][10/12] Time 0.159 (0.325) Loss 0.3898 (0.3460) Prec@1 88.281 (88.565)
Test: [0/2] Time 1.704 (1.704) Loss 0.4699 (0.4699) Prec@1 86.328 (86.328)
* epoch: 52 Prec@1 85.285
* epoch: 52 Prec@1 85.285
Checkpoint Saved: output/All/train_2020-03-26-17-10-28_model=MobilenetV3-ep=3000-block=6/checkpoint.pth.tar
Epoch: [53][0/12] Time 1.954 (1.954) Loss 0.2477 (0.2477) Prec@1 88.672 (88.672)
Epoch: [53][10/12] Time 0.159 (0.330) Loss 0.3368 (0.3348) Prec@1 88.672 (88.920)
Test: [0/2] Time 1.672 (1.672) Loss 0.9721 (0.9721) Prec@1 60.547 (60.547)
* epoch: 53 Prec@1 61.261
* epoch: 53 Prec@1 61.261
Checkpoint Saved: output/All/train_2020-03-26-17-10-28_model=MobilenetV3-ep=3000-block=6/checkpoint.pth.tar
Epoch: [54][0/12] Time 2.249 (2.249) Loss 0.4255 (0.4255) Prec@1 82.031 (82.031)
Epoch: [54][10/12] Time 0.160 (0.349) Loss 0.3790 (0.3706) Prec@1 85.547 (86.506)
Test: [0/2] Time 1.664 (1.664) Loss 0.4914 (0.4914) Prec@1 88.281 (88.281)
* epoch: 54 Prec@1 88.889
* epoch: 54 Prec@1 88.889
Checkpoint Saved: output/All/train_2020-03-26-17-10-28_model=MobilenetV3-ep=3000-block=6/checkpoint.pth.tar
New Best Checkpoint saved: output/All/train_2020-03-26-17-10-28_model=MobilenetV3-ep=3000-block=6/model_best.pth.tar
Epoch: [55][0/12] Time 1.919 (1.919) Loss 0.3694 (0.3694) Prec@1 85.547 (85.547)
Epoch: [55][10/12] Time 0.159 (0.319) Loss 0.3774 (0.3204) Prec@1 88.672 (89.240)
Test: [0/2] Time 1.689 (1.689) Loss 0.7353 (0.7353) Prec@1 80.078 (80.078)
* epoch: 55 Prec@1 79.880
* epoch: 55 Prec@1 79.880
Checkpoint Saved: output/All/train_2020-03-26-17-10-28_model=MobilenetV3-ep=3000-block=6/checkpoint.pth.tar
Epoch: [56][0/12] Time 1.947 (1.947) Loss 0.4603 (0.4603) Prec@1 83.594 (83.594)
Epoch: [56][10/12] Time 0.159 (0.322) Loss 0.3666 (0.3867) Prec@1 87.891 (86.932)
Test: [0/2] Time 1.722 (1.722) Loss 0.4614 (0.4614) Prec@1 86.719 (86.719)
* epoch: 56 Prec@1 86.486
* epoch: 56 Prec@1 86.486
Checkpoint Saved: output/All/train_2020-03-26-17-10-28_model=MobilenetV3-ep=3000-block=6/checkpoint.pth.tar
Epoch: [57][0/12] Time 1.980 (1.980) Loss 0.3089 (0.3089) Prec@1 89.844 (89.844)
Epoch: [57][10/12] Time 0.160 (0.323) Loss 0.2758 (0.3167) Prec@1 90.234 (89.027)
Test: [0/2] Time 1.713 (1.713) Loss 0.7926 (0.7926) Prec@1 69.141 (69.141)
* epoch: 57 Prec@1 68.769
* epoch: 57 Prec@1 68.769
Checkpoint Saved: output/All/train_2020-03-26-17-10-28_model=MobilenetV3-ep=3000-block=6/checkpoint.pth.tar
Epoch: [58][0/12] Time 1.897 (1.897) Loss 0.3536 (0.3536) Prec@1 86.328 (86.328)
Epoch: [58][10/12] Time 0.160 (0.318) Loss 0.3304 (0.3711) Prec@1 91.406 (87.109)
Test: [0/2] Time 1.722 (1.722) Loss 0.4612 (0.4612) Prec@1 89.062 (89.062)
* epoch: 58 Prec@1 89.790
* epoch: 58 Prec@1 89.790
Checkpoint Saved: output/All/train_2020-03-26-17-10-28_model=MobilenetV3-ep=3000-block=6/checkpoint.pth.tar
New Best Checkpoint saved: output/All/train_2020-03-26-17-10-28_model=MobilenetV3-ep=3000-block=6/model_best.pth.tar
Epoch: [59][0/12] Time 2.226 (2.226) Loss 0.2936 (0.2936) Prec@1 91.406 (91.406)
Epoch: [59][10/12] Time 0.159 (0.348) Loss 0.3097 (0.3106) Prec@1 87.500 (89.560)
Test: [0/2] Time 1.691 (1.691) Loss 0.5900 (0.5900) Prec@1 83.984 (83.984)
* epoch: 59 Prec@1 85.285
* epoch: 59 Prec@1 85.285
Checkpoint Saved: output/All/train_2020-03-26-17-10-28_model=MobilenetV3-ep=3000-block=6/checkpoint.pth.tar
Epoch: [60][0/12] Time 2.012 (2.012) Loss 0.2896 (0.2896) Prec@1 90.234 (90.234)
Epoch: [60][10/12] Time 0.158 (0.326) Loss 0.3392 (0.3331) Prec@1 87.891 (88.246)
Test: [0/2] Time 1.725 (1.725) Loss 0.5262 (0.5262) Prec@1 89.453 (89.453)
* epoch: 60 Prec@1 88.889
* epoch: 60 Prec@1 88.889
Checkpoint Saved: output/All/train_2020-03-26-17-10-28_model=MobilenetV3-ep=3000-block=6/checkpoint.pth.tar
Epoch: [61][0/12] Time 1.936 (1.936) Loss 0.3321 (0.3321) Prec@1 88.281 (88.281)
Epoch: [61][10/12] Time 0.159 (0.320) Loss 0.3931 (0.3128) Prec@1 87.891 (89.347)
Test: [0/2] Time 1.672 (1.672) Loss 0.8900 (0.8900) Prec@1 57.812 (57.812)
* epoch: 61 Prec@1 58.258
* epoch: 61 Prec@1 58.258
Checkpoint Saved: output/All/train_2020-03-26-17-10-28_model=MobilenetV3-ep=3000-block=6/checkpoint.pth.tar
Epoch: [62][0/12] Time 1.955 (1.955) Loss 0.3191 (0.3191) Prec@1 89.844 (89.844)
Epoch: [62][10/12] Time 0.154 (0.322) Loss 0.3570 (0.3385) Prec@1 88.281 (88.636)
Test: [0/2] Time 1.709 (1.709) Loss 0.4341 (0.4341) Prec@1 86.719 (86.719)
* epoch: 62 Prec@1 86.486
* epoch: 62 Prec@1 86.486
Checkpoint Saved: output/All/train_2020-03-26-17-10-28_model=MobilenetV3-ep=3000-block=6/checkpoint.pth.tar
Epoch: [63][0/12] Time 1.930 (1.930) Loss 0.2078 (0.2078) Prec@1 92.188 (92.188)
Epoch: [63][10/12] Time 0.159 (0.320) Loss 0.3926 (0.2808) Prec@1 88.281 (90.376)
Test: [0/2] Time 1.681 (1.681) Loss 0.5782 (0.5782) Prec@1 71.484 (71.484)
* epoch: 63 Prec@1 70.270
* epoch: 63 Prec@1 70.270
Checkpoint Saved: output/All/train_2020-03-26-17-10-28_model=MobilenetV3-ep=3000-block=6/checkpoint.pth.tar
Epoch: [64][0/12] Time 2.252 (2.252) Loss 0.3744 (0.3744) Prec@1 87.891 (87.891)
Epoch: [64][10/12] Time 0.158 (0.349) Loss 0.3298 (0.3217) Prec@1 88.672 (89.276)
Test: [0/2] Time 1.717 (1.717) Loss 0.4983 (0.4983) Prec@1 89.062 (89.062)
* epoch: 64 Prec@1 89.489
* epoch: 64 Prec@1 89.489
Checkpoint Saved: output/All/train_2020-03-26-17-10-28_model=MobilenetV3-ep=3000-block=6/checkpoint.pth.tar
Epoch: [65][0/12] Time 1.913 (1.913) Loss 0.3845 (0.3845) Prec@1 87.891 (87.891)
Epoch: [65][10/12] Time 0.161 (0.332) Loss 0.2949 (0.3003) Prec@1 89.062 (89.950)
Test: [0/2] Time 1.693 (1.693) Loss 0.5655 (0.5655) Prec@1 86.328 (86.328)
* epoch: 65 Prec@1 87.087
* epoch: 65 Prec@1 87.087
Checkpoint Saved: output/All/train_2020-03-26-17-10-28_model=MobilenetV3-ep=3000-block=6/checkpoint.pth.tar
Epoch: [66][0/12] Time 1.949 (1.949) Loss 0.4444 (0.4444) Prec@1 87.109 (87.109)
Epoch: [66][10/12] Time 0.159 (0.322) Loss 0.3399 (0.3130) Prec@1 89.062 (89.098)
Test: [0/2] Time 1.703 (1.703) Loss 0.5085 (0.5085) Prec@1 89.844 (89.844)
* epoch: 66 Prec@1 87.688
* epoch: 66 Prec@1 87.688
Checkpoint Saved: output/All/train_2020-03-26-17-10-28_model=MobilenetV3-ep=3000-block=6/checkpoint.pth.tar
Epoch: [67][0/12] Time 1.946 (1.946) Loss 0.2375 (0.2375) Prec@1 91.016 (91.016)
Epoch: [67][10/12] Time 0.159 (0.328) Loss 0.3387 (0.2769) Prec@1 85.547 (90.128)
Test: [0/2] Time 1.703 (1.703) Loss 0.5086 (0.5086) Prec@1 88.281 (88.281)
* epoch: 67 Prec@1 87.387
* epoch: 67 Prec@1 87.387
Checkpoint Saved: output/All/train_2020-03-26-17-10-28_model=MobilenetV3-ep=3000-block=6/checkpoint.pth.tar
Epoch: [68][0/12] Time 1.972 (1.972) Loss 0.3379 (0.3379) Prec@1 90.234 (90.234)
Epoch: [68][10/12] Time 0.159 (0.324) Loss 0.2454 (0.3145) Prec@1 91.406 (88.672)
Test: [0/2] Time 1.719 (1.719) Loss 0.6045 (0.6045) Prec@1 87.500 (87.500)
* epoch: 68 Prec@1 88.589
* epoch: 68 Prec@1 88.589
Checkpoint Saved: output/All/train_2020-03-26-17-10-28_model=MobilenetV3-ep=3000-block=6/checkpoint.pth.tar
Epoch: [69][0/12] Time 1.947 (1.947) Loss 0.2363 (0.2363) Prec@1 90.625 (90.625)
Epoch: [69][10/12] Time 0.159 (0.321) Loss 0.2486 (0.2593) Prec@1 90.234 (90.874)
Test: [0/2] Time 1.710 (1.710) Loss 0.5298 (0.5298) Prec@1 80.078 (80.078)
* epoch: 69 Prec@1 81.381
* epoch: 69 Prec@1 81.381
Checkpoint Saved: output/All/train_2020-03-26-17-10-28_model=MobilenetV3-ep=3000-block=6/checkpoint.pth.tar
Epoch: [70][0/12] Time 1.949 (1.949) Loss 0.2916 (0.2916) Prec@1 89.453 (89.453)
Epoch: [70][10/12] Time 0.158 (0.328) Loss 0.2786 (0.2966) Prec@1 89.453 (89.737)
Test: [0/2] Time 1.683 (1.683) Loss 0.4976 (0.4976) Prec@1 87.500 (87.500)
* epoch: 70 Prec@1 88.288
* epoch: 70 Prec@1 88.288
Checkpoint Saved: output/All/train_2020-03-26-17-10-28_model=MobilenetV3-ep=3000-block=6/checkpoint.pth.tar
Epoch: [71][0/12] Time 1.928 (1.928) Loss 0.1949 (0.1949) Prec@1 94.141 (94.141)
Epoch: [71][10/12] Time 0.159 (0.330) Loss 0.1884 (0.2445) Prec@1 93.359 (90.696)
Test: [0/2] Time 1.684 (1.684) Loss 0.5251 (0.5251) Prec@1 82.812 (82.812)
* epoch: 71 Prec@1 81.081
* epoch: 71 Prec@1 81.081
Checkpoint Saved: output/All/train_2020-03-26-17-10-28_model=MobilenetV3-ep=3000-block=6/checkpoint.pth.tar
Epoch: [72][0/12] Time 1.930 (1.930) Loss 0.3282 (0.3282) Prec@1 87.500 (87.500)
Epoch: [72][10/12] Time 0.159 (0.320) Loss 0.3842 (0.3169) Prec@1 86.719 (89.666)
Test: [0/2] Time 1.709 (1.709) Loss 0.6996 (0.6996) Prec@1 89.453 (89.453)
* epoch: 72 Prec@1 90.390
* epoch: 72 Prec@1 90.390
Checkpoint Saved: output/All/train_2020-03-26-17-10-28_model=MobilenetV3-ep=3000-block=6/checkpoint.pth.tar
New Best Checkpoint saved: output/All/train_2020-03-26-17-10-28_model=MobilenetV3-ep=3000-block=6/model_best.pth.tar
Epoch: [73][0/12] Time 2.187 (2.187) Loss 0.3168 (0.3168) Prec@1 87.500 (87.500)
Epoch: [73][10/12] Time 0.159 (0.343) Loss 0.3492 (0.2848) Prec@1 85.938 (89.986)
Test: [0/2] Time 1.708 (1.708) Loss 0.9264 (0.9264) Prec@1 85.547 (85.547)
* epoch: 73 Prec@1 86.787
* epoch: 73 Prec@1 86.787
Checkpoint Saved: output/All/train_2020-03-26-17-10-28_model=MobilenetV3-ep=3000-block=6/checkpoint.pth.tar
Epoch: [74][0/12] Time 1.913 (1.913) Loss 0.3152 (0.3152) Prec@1 88.281 (88.281)
Epoch: [74][10/12] Time 0.159 (0.319) Loss 0.2675 (0.2727) Prec@1 92.578 (90.518)
Test: [0/2] Time 1.720 (1.720) Loss 0.5490 (0.5490) Prec@1 86.328 (86.328)
* epoch: 74 Prec@1 86.186
* epoch: 74 Prec@1 86.186
Checkpoint Saved: output/All/train_2020-03-26-17-10-28_model=MobilenetV3-ep=3000-block=6/checkpoint.pth.tar
Epoch: [75][0/12] Time 1.922 (1.922) Loss 0.2301 (0.2301) Prec@1 90.625 (90.625)
Epoch: [75][10/12] Time 0.159 (0.320) Loss 0.3265 (0.2525) Prec@1 87.109 (91.229)
Test: [0/2] Time 1.699 (1.699) Loss 0.6857 (0.6857) Prec@1 82.812 (82.812)
* epoch: 75 Prec@1 82.282
* epoch: 75 Prec@1 82.282
Checkpoint Saved: output/All/train_2020-03-26-17-10-28_model=MobilenetV3-ep=3000-block=6/checkpoint.pth.tar
Epoch: [76][0/12] Time 2.240 (2.240) Loss 0.2848 (0.2848) Prec@1 87.500 (87.500)
Epoch: [76][10/12] Time 0.160 (0.348) Loss 0.3408 (0.3049) Prec@1 87.500 (89.560)
Test: [0/2] Time 1.707 (1.707) Loss 0.5952 (0.5952) Prec@1 81.641 (81.641)
* epoch: 76 Prec@1 81.682
* epoch: 76 Prec@1 81.682
Checkpoint Saved: output/All/train_2020-03-26-17-10-28_model=MobilenetV3-ep=3000-block=6/checkpoint.pth.tar
Epoch: [77][0/12] Time 1.904 (1.904) Loss 0.2558 (0.2558) Prec@1 92.578 (92.578)
Epoch: [77][10/12] Time 0.159 (0.324) Loss 0.3256 (0.2529) Prec@1 87.500 (91.229)
Test: [0/2] Time 1.708 (1.708) Loss 0.5739 (0.5739) Prec@1 85.156 (85.156)
* epoch: 77 Prec@1 82.883
* epoch: 77 Prec@1 82.883
Checkpoint Saved: output/All/train_2020-03-26-17-10-28_model=MobilenetV3-ep=3000-block=6/checkpoint.pth.tar
Epoch: [78][0/12] Time 2.032 (2.032) Loss 0.2934 (0.2934) Prec@1 89.844 (89.844)
Epoch: [78][10/12] Time 0.159 (0.330) Loss 0.2887 (0.2670) Prec@1 92.578 (91.300)
Test: [0/2] Time 1.688 (1.688) Loss 0.5590 (0.5590) Prec@1 85.156 (85.156)
* epoch: 78 Prec@1 86.186
* epoch: 78 Prec@1 86.186
Checkpoint Saved: output/All/train_2020-03-26-17-10-28_model=MobilenetV3-ep=3000-block=6/checkpoint.pth.tar
Epoch: [79][0/12] Time 1.945 (1.945) Loss 0.2431 (0.2431) Prec@1 91.797 (91.797)
Epoch: [79][10/12] Time 0.160 (0.332) Loss 0.3305 (0.2164) Prec@1 87.109 (92.543)
Test: [0/2] Time 1.675 (1.675) Loss 0.7119 (0.7119) Prec@1 77.344 (77.344)
* epoch: 79 Prec@1 76.877
* epoch: 79 Prec@1 76.877
Checkpoint Saved: output/All/train_2020-03-26-17-10-28_model=MobilenetV3-ep=3000-block=6/checkpoint.pth.tar
Epoch: [80][0/12] Time 2.189 (2.189) Loss 0.2332 (0.2332) Prec@1 92.969 (92.969)
Epoch: [80][10/12] Time 0.159 (0.343) Loss 0.2309 (0.2770) Prec@1 93.359 (91.335)
Test: [0/2] Time 1.680 (1.680) Loss 0.3864 (0.3864) Prec@1 90.234 (90.234)
* epoch: 80 Prec@1 87.688
* epoch: 80 Prec@1 87.688
Checkpoint Saved: output/All/train_2020-03-26-17-10-28_model=MobilenetV3-ep=3000-block=6/checkpoint.pth.tar
Epoch: [81][0/12] Time 2.239 (2.239) Loss 0.2767 (0.2767) Prec@1 92.578 (92.578)
Epoch: [81][10/12] Time 0.160 (0.348) Loss 0.3311 (0.2484) Prec@1 86.328 (91.335)
Test: [0/2] Time 1.720 (1.720) Loss 0.6348 (0.6348) Prec@1 76.562 (76.562)
* epoch: 81 Prec@1 76.276
* epoch: 81 Prec@1 76.276
Checkpoint Saved: output/All/train_2020-03-26-17-10-28_model=MobilenetV3-ep=3000-block=6/checkpoint.pth.tar
Epoch: [82][0/12] Time 2.256 (2.256) Loss 0.1899 (0.1899) Prec@1 92.969 (92.969)
Epoch: [82][10/12] Time 0.159 (0.349) Loss 0.2917 (0.2652) Prec@1 90.625 (91.442)
Test: [0/2] Time 1.689 (1.689) Loss 0.5168 (0.5168) Prec@1 85.156 (85.156)
* epoch: 82 Prec@1 85.886
* epoch: 82 Prec@1 85.886
Checkpoint Saved: output/All/train_2020-03-26-17-10-28_model=MobilenetV3-ep=3000-block=6/checkpoint.pth.tar
Epoch: [83][0/12] Time 2.051 (2.051) Loss 0.1982 (0.1982) Prec@1 92.188 (92.188)
Epoch: [83][10/12] Time 0.159 (0.332) Loss 0.2491 (0.2078) Prec@1 89.844 (92.543)
Test: [0/2] Time 1.682 (1.682) Loss 0.6296 (0.6296) Prec@1 80.078 (80.078)
* epoch: 83 Prec@1 77.177
* epoch: 83 Prec@1 77.177
Checkpoint Saved: output/All/train_2020-03-26-17-10-28_model=MobilenetV3-ep=3000-block=6/checkpoint.pth.tar
Epoch: [84][0/12] Time 1.912 (1.912) Loss 0.1927 (0.1927) Prec@1 93.359 (93.359)
Epoch: [84][10/12] Time 0.157 (0.318) Loss 0.1888 (0.2599) Prec@1 93.359 (91.406)
Test: [0/2] Time 1.707 (1.707) Loss 0.5512 (0.5512) Prec@1 88.672 (88.672)
* epoch: 84 Prec@1 89.790
* epoch: 84 Prec@1 89.790
Checkpoint Saved: output/All/train_2020-03-26-17-10-28_model=MobilenetV3-ep=3000-block=6/checkpoint.pth.tar
Epoch: [85][0/12] Time 1.909 (1.909) Loss 0.1752 (0.1752) Prec@1 93.750 (93.750)
Epoch: [85][10/12] Time 0.161 (0.319) Loss 0.2747 (0.2133) Prec@1 91.797 (92.614)
Test: [0/2] Time 1.704 (1.704) Loss 0.5671 (0.5671) Prec@1 86.328 (86.328)
* epoch: 85 Prec@1 87.387
* epoch: 85 Prec@1 87.387
Checkpoint Saved: output/All/train_2020-03-26-17-10-28_model=MobilenetV3-ep=3000-block=6/checkpoint.pth.tar
Epoch: [86][0/12] Time 2.240 (2.240) Loss 0.2070 (0.2070) Prec@1 89.844 (89.844)
Epoch: [86][10/12] Time 0.160 (0.348) Loss 0.2832 (0.2450) Prec@1 91.797 (91.513)
Test: [0/2] Time 1.709 (1.709) Loss 0.5368 (0.5368) Prec@1 86.719 (86.719)
* epoch: 86 Prec@1 85.586
* epoch: 86 Prec@1 85.586
Checkpoint Saved: output/All/train_2020-03-26-17-10-28_model=MobilenetV3-ep=3000-block=6/checkpoint.pth.tar
Epoch: [87][0/12] Time 1.953 (1.953) Loss 0.1469 (0.1469) Prec@1 96.094 (96.094)
Epoch: [87][10/12] Time 0.161 (0.329) Loss 0.1976 (0.2014) Prec@1 92.969 (93.466)
Test: [0/2] Time 1.704 (1.704) Loss 0.6268 (0.6268) Prec@1 86.328 (86.328)
* epoch: 87 Prec@1 88.288
* epoch: 87 Prec@1 88.288
Checkpoint Saved: output/All/train_2020-03-26-17-10-28_model=MobilenetV3-ep=3000-block=6/checkpoint.pth.tar
Epoch: [88][0/12] Time 1.976 (1.976) Loss 0.1892 (0.1892) Prec@1 92.969 (92.969)
Epoch: [88][10/12] Time 0.159 (0.323) Loss 0.1663 (0.2332) Prec@1 93.359 (91.442)
Test: [0/2] Time 1.725 (1.725) Loss 0.4768 (0.4768) Prec@1 89.844 (89.844)
* epoch: 88 Prec@1 89.489
* epoch: 88 Prec@1 89.489
Checkpoint Saved: output/All/train_2020-03-26-17-10-28_model=MobilenetV3-ep=3000-block=6/checkpoint.pth.tar
Epoch: [89][0/12] Time 2.208 (2.208) Loss 0.1286 (0.1286) Prec@1 94.141 (94.141)
Epoch: [89][10/12] Time 0.159 (0.345) Loss 0.1622 (0.1895) Prec@1 93.750 (92.862)
Test: [0/2] Time 1.670 (1.670) Loss 0.7504 (0.7504) Prec@1 86.719 (86.719)
* epoch: 89 Prec@1 88.288
* epoch: 89 Prec@1 88.288
Checkpoint Saved: output/All/train_2020-03-26-17-10-28_model=MobilenetV3-ep=3000-block=6/checkpoint.pth.tar
Epoch: [90][0/12] Time 1.919 (1.919) Loss 0.1749 (0.1749) Prec@1 94.531 (94.531)
Epoch: [90][10/12] Time 0.160 (0.319) Loss 0.3030 (0.2244) Prec@1 90.234 (92.472)
Test: [0/2] Time 1.703 (1.703) Loss 0.6520 (0.6520) Prec@1 87.500 (87.500)
* epoch: 90 Prec@1 89.489
* epoch: 90 Prec@1 89.489
Checkpoint Saved: output/All/train_2020-03-26-17-10-28_model=MobilenetV3-ep=3000-block=6/checkpoint.pth.tar
Epoch: [91][0/12] Time 2.019 (2.019) Loss 0.1967 (0.1967) Prec@1 93.750 (93.750)
Epoch: [91][10/12] Time 0.160 (0.328) Loss 0.2092 (0.1974) Prec@1 91.406 (93.146)
Test: [0/2] Time 1.717 (1.717) Loss 0.5337 (0.5337) Prec@1 89.453 (89.453)
* epoch: 91 Prec@1 89.489
* epoch: 91 Prec@1 89.489
Checkpoint Saved: output/All/train_2020-03-26-17-10-28_model=MobilenetV3-ep=3000-block=6/checkpoint.pth.tar
Epoch: [92][0/12] Time 1.908 (1.908) Loss 0.1796 (0.1796) Prec@1 93.750 (93.750)
Epoch: [92][10/12] Time 0.160 (0.320) Loss 0.2263 (0.2480) Prec@1 93.359 (91.868)
Test: [0/2] Time 1.715 (1.715) Loss 0.4933 (0.4933) Prec@1 88.672 (88.672)
* epoch: 92 Prec@1 89.189
* epoch: 92 Prec@1 89.189
Checkpoint Saved: output/All/train_2020-03-26-17-10-28_model=MobilenetV3-ep=3000-block=6/checkpoint.pth.tar
Epoch: [93][0/12] Time 1.934 (1.934) Loss 0.1817 (0.1817) Prec@1 93.750 (93.750)
Epoch: [93][10/12] Time 0.158 (0.320) Loss 0.2488 (0.2133) Prec@1 90.625 (92.330)
Test: [0/2] Time 1.699 (1.699) Loss 0.7086 (0.7086) Prec@1 80.469 (80.469)
* epoch: 93 Prec@1 79.279
* epoch: 93 Prec@1 79.279
Checkpoint Saved: output/All/train_2020-03-26-17-10-28_model=MobilenetV3-ep=3000-block=6/checkpoint.pth.tar
Epoch: [94][0/12] Time 1.955 (1.955) Loss 0.2422 (0.2422) Prec@1 89.453 (89.453)
Epoch: [94][10/12] Time 0.159 (0.322) Loss 0.1780 (0.2533) Prec@1 93.359 (90.518)
Test: [0/2] Time 1.699 (1.699) Loss 0.6035 (0.6035) Prec@1 89.062 (89.062)
* epoch: 94 Prec@1 89.189
* epoch: 94 Prec@1 89.189
Checkpoint Saved: output/All/train_2020-03-26-17-10-28_model=MobilenetV3-ep=3000-block=6/checkpoint.pth.tar
Epoch: [95][0/12] Time 1.975 (1.975) Loss 0.2582 (0.2582) Prec@1 90.234 (90.234)
Epoch: [95][10/12] Time 0.159 (0.323) Loss 0.2944 (0.2084) Prec@1 92.188 (92.791)
Test: [0/2] Time 1.730 (1.730) Loss 1.0666 (1.0666) Prec@1 67.188 (67.188)
* epoch: 95 Prec@1 66.967
* epoch: 95 Prec@1 66.967
Checkpoint Saved: output/All/train_2020-03-26-17-10-28_model=MobilenetV3-ep=3000-block=6/checkpoint.pth.tar
Epoch: [96][0/12] Time 1.966 (1.966) Loss 0.1643 (0.1643) Prec@1 94.141 (94.141)
Epoch: [96][10/12] Time 0.160 (0.331) Loss 0.2444 (0.2336) Prec@1 91.016 (92.294)
Test: [0/2] Time 1.694 (1.694) Loss 0.5861 (0.5861) Prec@1 86.328 (86.328)
* epoch: 96 Prec@1 86.486
* epoch: 96 Prec@1 86.486
Checkpoint Saved: output/All/train_2020-03-26-17-10-28_model=MobilenetV3-ep=3000-block=6/checkpoint.pth.tar
Epoch: [97][0/12] Time 1.914 (1.914) Loss 0.1659 (0.1659) Prec@1 94.531 (94.531)
Epoch: [97][10/12] Time 0.160 (0.330) Loss 0.2504 (0.1904) Prec@1 92.578 (93.466)
Test: [0/2] Time 1.678 (1.678) Loss 0.6894 (0.6894) Prec@1 78.906 (78.906)
* epoch: 97 Prec@1 80.480
* epoch: 97 Prec@1 80.480
Checkpoint Saved: output/All/train_2020-03-26-17-10-28_model=MobilenetV3-ep=3000-block=6/checkpoint.pth.tar
Epoch: [98][0/12] Time 1.902 (1.902) Loss 0.2155 (0.2155) Prec@1 91.406 (91.406)
Epoch: [98][10/12] Time 0.159 (0.317) Loss 0.2941 (0.2129) Prec@1 92.578 (92.401)
Test: [0/2] Time 1.687 (1.687) Loss 0.3753 (0.3753) Prec@1 94.922 (94.922)
* epoch: 98 Prec@1 93.093
* epoch: 98 Prec@1 93.093
Checkpoint Saved: output/All/train_2020-03-26-17-10-28_model=MobilenetV3-ep=3000-block=6/checkpoint.pth.tar
New Best Checkpoint saved: output/All/train_2020-03-26-17-10-28_model=MobilenetV3-ep=3000-block=6/model_best.pth.tar
Epoch: [99][0/12] Time 1.923 (1.923) Loss 0.1940 (0.1940) Prec@1 95.312 (95.312)
Epoch: [99][10/12] Time 0.160 (0.321) Loss 0.1865 (0.1930) Prec@1 93.750 (93.537)
Test: [0/2] Time 1.672 (1.672) Loss 0.5767 (0.5767) Prec@1 83.594 (83.594)
* epoch: 99 Prec@1 83.483
* epoch: 99 Prec@1 83.483
Checkpoint Saved: output/All/train_2020-03-26-17-10-28_model=MobilenetV3-ep=3000-block=6/checkpoint.pth.tar
Epoch: [100][0/12] Time 1.946 (1.946) Loss 0.3028 (0.3028) Prec@1 91.016 (91.016)
Epoch: [100][10/12] Time 0.159 (0.323) Loss 0.2235 (0.2103) Prec@1 91.406 (92.969)
Test: [0/2] Time 1.704 (1.704) Loss 0.5625 (0.5625) Prec@1 89.844 (89.844)
* epoch: 100 Prec@1 88.889
* epoch: 100 Prec@1 88.889
Checkpoint Saved: output/All/train_2020-03-26-17-10-28_model=MobilenetV3-ep=3000-block=6/checkpoint.pth.tar
Epoch: [101][0/12] Time 1.920 (1.920) Loss 0.3380 (0.3380) Prec@1 89.844 (89.844)
Epoch: [101][10/12] Time 0.159 (0.320) Loss 0.1733 (0.1909) Prec@1 92.188 (93.679)
Test: [0/2] Time 1.678 (1.678) Loss 0.6445 (0.6445) Prec@1 89.062 (89.062)
* epoch: 101 Prec@1 88.589
* epoch: 101 Prec@1 88.589
Checkpoint Saved: output/All/train_2020-03-26-17-10-28_model=MobilenetV3-ep=3000-block=6/checkpoint.pth.tar
Epoch: [102][0/12] Time 1.895 (1.895) Loss 0.2093 (0.2093) Prec@1 92.969 (92.969)
Epoch: [102][10/12] Time 0.159 (0.317) Loss 0.2647 (0.2172) Prec@1 89.844 (92.791)
Test: [0/2] Time 1.691 (1.691) Loss 0.4537 (0.4537) Prec@1 89.844 (89.844)
* epoch: 102 Prec@1 90.390
* epoch: 102 Prec@1 90.390
Checkpoint Saved: output/All/train_2020-03-26-17-10-28_model=MobilenetV3-ep=3000-block=6/checkpoint.pth.tar
Epoch: [103][0/12] Time 1.975 (1.975) Loss 0.1979 (0.1979) Prec@1 92.969 (92.969)
Epoch: [103][10/12] Time 0.159 (0.324) Loss 0.2140 (0.1836) Prec@1 89.844 (93.217)
Test: [0/2] Time 1.706 (1.706) Loss 0.7860 (0.7860) Prec@1 89.062 (89.062)
* epoch: 103 Prec@1 90.090
* epoch: 103 Prec@1 90.090
Checkpoint Saved: output/All/train_2020-03-26-17-10-28_model=MobilenetV3-ep=3000-block=6/checkpoint.pth.tar
Epoch: [104][0/12] Time 2.022 (2.022) Loss 0.2810 (0.2810) Prec@1 92.578 (92.578)
Epoch: [104][10/12] Time 0.159 (0.330) Loss 0.2480 (0.2219) Prec@1 91.797 (92.081)
Test: [0/2] Time 1.716 (1.716) Loss 0.6215 (0.6215) Prec@1 90.625 (90.625)
* epoch: 104 Prec@1 91.592
* epoch: 104 Prec@1 91.592
Checkpoint Saved: output/All/train_2020-03-26-17-10-28_model=MobilenetV3-ep=3000-block=6/checkpoint.pth.tar
Epoch: [105][0/12] Time 1.906 (1.906) Loss 0.1642 (0.1642) Prec@1 95.312 (95.312)
Epoch: [105][10/12] Time 0.159 (0.319) Loss 0.1640 (0.1851) Prec@1 93.750 (93.928)
Test: [0/2] Time 1.708 (1.708) Loss 0.5004 (0.5004) Prec@1 90.625 (90.625)
* epoch: 105 Prec@1 90.390
* epoch: 105 Prec@1 90.390
Checkpoint Saved: output/All/train_2020-03-26-17-10-28_model=MobilenetV3-ep=3000-block=6/checkpoint.pth.tar
Epoch: [106][0/12] Time 1.909 (1.909) Loss 0.1803 (0.1803) Prec@1 90.234 (90.234)
Epoch: [106][10/12] Time 0.158 (0.319) Loss 0.1994 (0.2141) Prec@1 91.797 (92.223)
Test: [0/2] Time 1.688 (1.688) Loss 0.4920 (0.4920) Prec@1 82.031 (82.031)
* epoch: 106 Prec@1 79.580
* epoch: 106 Prec@1 79.580
Checkpoint Saved: output/All/train_2020-03-26-17-10-28_model=MobilenetV3-ep=3000-block=6/checkpoint.pth.tar
Epoch: [107][0/12] Time 1.971 (1.971) Loss 0.1579 (0.1579) Prec@1 92.969 (92.969)
Epoch: [107][10/12] Time 0.160 (0.330) Loss 0.1896 (0.1695) Prec@1 93.359 (93.786)
Test: [0/2] Time 1.693 (1.693) Loss 0.6627 (0.6627) Prec@1 85.156 (85.156)
* epoch: 107 Prec@1 85.285
* epoch: 107 Prec@1 85.285
Checkpoint Saved: output/All/train_2020-03-26-17-10-28_model=MobilenetV3-ep=3000-block=6/checkpoint.pth.tar
Epoch: [108][0/12] Time 1.926 (1.926) Loss 0.2721 (0.2721) Prec@1 90.234 (90.234)
Epoch: [108][10/12] Time 0.159 (0.334) Loss 0.1391 (0.1956) Prec@1 94.531 (93.395)
Test: [0/2] Time 1.693 (1.693) Loss 0.6169 (0.6169) Prec@1 87.891 (87.891)
* epoch: 108 Prec@1 88.889
* epoch: 108 Prec@1 88.889
Checkpoint Saved: output/All/train_2020-03-26-17-10-28_model=MobilenetV3-ep=3000-block=6/checkpoint.pth.tar
Epoch: [109][0/12] Time 1.932 (1.932) Loss 0.1255 (0.1255) Prec@1 96.094 (96.094)
Epoch: [109][10/12] Time 0.159 (0.318) Loss 0.1880 (0.1642) Prec@1 93.750 (94.567)
Test: [0/2] Time 1.702 (1.702) Loss 0.6942 (0.6942) Prec@1 86.719 (86.719)
* epoch: 109 Prec@1 88.288
* epoch: 109 Prec@1 88.288
Checkpoint Saved: output/All/train_2020-03-26-17-10-28_model=MobilenetV3-ep=3000-block=6/checkpoint.pth.tar
Epoch: [110][0/12] Time 1.941 (1.941) Loss 0.1494 (0.1494) Prec@1 95.312 (95.312)
Epoch: [110][10/12] Time 0.159 (0.320) Loss 0.2451 (0.2071) Prec@1 91.016 (92.330)
Test: [0/2] Time 1.721 (1.721) Loss 0.5960 (0.5960) Prec@1 86.328 (86.328)
* epoch: 110 Prec@1 86.486
* epoch: 110 Prec@1 86.486
Checkpoint Saved: output/All/train_2020-03-26-17-10-28_model=MobilenetV3-ep=3000-block=6/checkpoint.pth.tar
Epoch: [111][0/12] Time 1.969 (1.969) Loss 0.1588 (0.1588) Prec@1 94.141 (94.141)
Epoch: [111][10/12] Time 0.160 (0.323) Loss 0.1406 (0.1750) Prec@1 96.094 (93.395)
Test: [0/2] Time 1.680 (1.680) Loss 0.6977 (0.6977) Prec@1 90.625 (90.625)
* epoch: 111 Prec@1 90.090
* epoch: 111 Prec@1 90.090
Checkpoint Saved: output/All/train_2020-03-26-17-10-28_model=MobilenetV3-ep=3000-block=6/checkpoint.pth.tar
Epoch: [112][0/12] Time 1.914 (1.914) Loss 0.2185 (0.2185) Prec@1 92.578 (92.578)
Epoch: [112][10/12] Time 0.161 (0.318) Loss 0.1134 (0.1589) Prec@1 94.922 (94.389)
Test: [0/2] Time 1.692 (1.692) Loss 0.6469 (0.6469) Prec@1 89.062 (89.062)
* epoch: 112 Prec@1 89.489
* epoch: 112 Prec@1 89.489
Checkpoint Saved: output/All/train_2020-03-26-17-10-28_model=MobilenetV3-ep=3000-block=6/checkpoint.pth.tar
Epoch: [113][0/12] Time 1.978 (1.978) Loss 0.1283 (0.1283) Prec@1 95.703 (95.703)
Epoch: [113][10/12] Time 0.158 (0.323) Loss 0.2125 (0.1576) Prec@1 93.359 (94.815)
Test: [0/2] Time 1.699 (1.699) Loss 0.9452 (0.9452) Prec@1 67.578 (67.578)
* epoch: 113 Prec@1 65.766
* epoch: 113 Prec@1 65.766
Checkpoint Saved: output/All/train_2020-03-26-17-10-28_model=MobilenetV3-ep=3000-block=6/checkpoint.pth.tar
Epoch: [114][0/12] Time 1.950 (1.950) Loss 0.2040 (0.2040) Prec@1 92.188 (92.188)
Epoch: [114][10/12] Time 0.159 (0.322) Loss 0.2110 (0.1899) Prec@1 94.531 (93.253)
Test: [0/2] Time 1.687 (1.687) Loss 0.5138 (0.5138) Prec@1 86.719 (86.719)
* epoch: 114 Prec@1 86.486
* epoch: 114 Prec@1 86.486
Checkpoint Saved: output/All/train_2020-03-26-17-10-28_model=MobilenetV3-ep=3000-block=6/checkpoint.pth.tar
Epoch: [115][0/12] Time 1.998 (1.998) Loss 0.1638 (0.1638) Prec@1 95.703 (95.703)
Epoch: [115][10/12] Time 0.160 (0.331) Loss 0.2106 (0.1672) Prec@1 92.578 (94.354)
Test: [0/2] Time 1.709 (1.709) Loss 0.8591 (0.8591) Prec@1 89.062 (89.062)
* epoch: 115 Prec@1 89.790
* epoch: 115 Prec@1 89.790
Checkpoint Saved: output/All/train_2020-03-26-17-10-28_model=MobilenetV3-ep=3000-block=6/checkpoint.pth.tar
Epoch: [116][0/12] Time 1.976 (1.976) Loss 0.1392 (0.1392) Prec@1 92.969 (92.969)
Epoch: [116][10/12] Time 0.160 (0.336) Loss 0.2718 (0.2105) Prec@1 92.578 (93.253)
Test: [0/2] Time 1.718 (1.718) Loss 0.4857 (0.4857) Prec@1 88.281 (88.281)
* epoch: 116 Prec@1 88.589
* epoch: 116 Prec@1 88.589
Checkpoint Saved: output/All/train_2020-03-26-17-10-28_model=MobilenetV3-ep=3000-block=6/checkpoint.pth.tar
Epoch: [117][0/12] Time 2.203 (2.203) Loss 0.1616 (0.1616) Prec@1 92.969 (92.969)
Epoch: [117][10/12] Time 0.159 (0.345) Loss 0.1600 (0.1427) Prec@1 94.141 (94.638)
Test: [0/2] Time 1.732 (1.732) Loss 0.8432 (0.8432) Prec@1 89.453 (89.453)
* epoch: 117 Prec@1 90.691
* epoch: 117 Prec@1 90.691
Checkpoint Saved: output/All/train_2020-03-26-17-10-28_model=MobilenetV3-ep=3000-block=6/checkpoint.pth.tar
Epoch: [118][0/12] Time 1.918 (1.918) Loss 0.1563 (0.1563) Prec@1 94.141 (94.141)
Epoch: [118][10/12] Time 0.160 (0.319) Loss 0.1545 (0.1740) Prec@1 93.750 (93.892)
Test: [0/2] Time 1.722 (1.722) Loss 0.4324 (0.4324) Prec@1 91.797 (91.797)
* epoch: 118 Prec@1 90.991
* epoch: 118 Prec@1 90.991
Checkpoint Saved: output/All/train_2020-03-26-17-10-28_model=MobilenetV3-ep=3000-block=6/checkpoint.pth.tar
Epoch: [119][0/12] Time 1.912 (1.912) Loss 0.1632 (0.1632) Prec@1 92.578 (92.578)
Epoch: [119][10/12] Time 0.160 (0.317) Loss 0.1550 (0.1470) Prec@1 94.922 (94.638)
Test: [0/2] Time 1.743 (1.743) Loss 0.5448 (0.5448) Prec@1 86.719 (86.719)
* epoch: 119 Prec@1 85.886
* epoch: 119 Prec@1 85.886
Checkpoint Saved: output/All/train_2020-03-26-17-10-28_model=MobilenetV3-ep=3000-block=6/checkpoint.pth.tar
Epoch: [120][0/12] Time 1.956 (1.956) Loss 0.1617 (0.1617) Prec@1 95.703 (95.703)
Epoch: [120][10/12] Time 0.159 (0.322) Loss 0.1568 (0.1884) Prec@1 94.531 (93.466)
Test: [0/2] Time 1.724 (1.724) Loss 0.4884 (0.4884) Prec@1 89.062 (89.062)
* epoch: 120 Prec@1 89.189
* epoch: 120 Prec@1 89.189
Checkpoint Saved: output/All/train_2020-03-26-17-10-28_model=MobilenetV3-ep=3000-block=6/checkpoint.pth.tar
Epoch: [121][0/12] Time 2.252 (2.252) Loss 0.0956 (0.0956) Prec@1 96.484 (96.484)
Epoch: [121][10/12] Time 0.160 (0.350) Loss 0.0892 (0.1378) Prec@1 96.094 (94.425)
Test: [0/2] Time 1.702 (1.702) Loss 0.9220 (0.9220) Prec@1 71.875 (71.875)
* epoch: 121 Prec@1 72.372
* epoch: 121 Prec@1 72.372
Checkpoint Saved: output/All/train_2020-03-26-17-10-28_model=MobilenetV3-ep=3000-block=6/checkpoint.pth.tar
Epoch: [122][0/12] Time 2.193 (2.193) Loss 0.1376 (0.1376) Prec@1 93.359 (93.359)
Epoch: [122][10/12] Time 0.154 (0.344) Loss 0.1217 (0.1669) Prec@1 95.312 (94.034)
Test: [0/2] Time 1.728 (1.728) Loss 0.4749 (0.4749) Prec@1 91.406 (91.406)
* epoch: 122 Prec@1 90.090
* epoch: 122 Prec@1 90.090
Checkpoint Saved: output/All/train_2020-03-26-17-10-28_model=MobilenetV3-ep=3000-block=6/checkpoint.pth.tar
Epoch: [123][0/12] Time 1.969 (1.969) Loss 0.1731 (0.1731) Prec@1 93.359 (93.359)
Epoch: [123][10/12] Time 0.160 (0.332) Loss 0.1657 (0.1350) Prec@1 95.703 (95.419)
Test: [0/2] Time 1.702 (1.702) Loss 0.4422 (0.4422) Prec@1 92.188 (92.188)
* epoch: 123 Prec@1 90.991
* epoch: 123 Prec@1 90.991
Checkpoint Saved: output/All/train_2020-03-26-17-10-28_model=MobilenetV3-ep=3000-block=6/checkpoint.pth.tar
Epoch: [124][0/12] Time 1.910 (1.910) Loss 0.1530 (0.1530) Prec@1 94.922 (94.922)
Epoch: [124][10/12] Time 0.161 (0.335) Loss 0.1130 (0.1614) Prec@1 96.094 (94.744)
Test: [0/2] Time 1.703 (1.703) Loss 0.6778 (0.6778) Prec@1 91.016 (91.016)
* epoch: 124 Prec@1 91.592
* epoch: 124 Prec@1 91.592
Checkpoint Saved: output/All/train_2020-03-26-17-10-28_model=MobilenetV3-ep=3000-block=6/checkpoint.pth.tar
Epoch: [125][0/12] Time 1.927 (1.927) Loss 0.1401 (0.1401) Prec@1 95.312 (95.312)
Epoch: [125][10/12] Time 0.159 (0.334) Loss 0.1760 (0.1236) Prec@1 94.141 (95.774)
Test: [0/2] Time 1.697 (1.697) Loss 0.8132 (0.8132) Prec@1 70.703 (70.703)
* epoch: 125 Prec@1 72.072
* epoch: 125 Prec@1 72.072
Checkpoint Saved: output/All/train_2020-03-26-17-10-28_model=MobilenetV3-ep=3000-block=6/checkpoint.pth.tar
Epoch: [126][0/12] Time 2.032 (2.032) Loss 0.1410 (0.1410) Prec@1 94.141 (94.141)
Epoch: [126][10/12] Time 0.159 (0.330) Loss 0.1733 (0.1614) Prec@1 96.094 (93.928)
Test: [0/2] Time 1.692 (1.692) Loss 0.4986 (0.4986) Prec@1 86.328 (86.328)
* epoch: 126 Prec@1 87.688
* epoch: 126 Prec@1 87.688
Checkpoint Saved: output/All/train_2020-03-26-17-10-28_model=MobilenetV3-ep=3000-block=6/checkpoint.pth.tar
Epoch: [127][0/12] Time 2.220 (2.220) Loss 0.1754 (0.1754) Prec@1 93.750 (93.750)
Epoch: [127][10/12] Time 0.159 (0.346) Loss 0.1546 (0.1384) Prec@1 93.750 (94.922)
Test: [0/2] Time 1.701 (1.701) Loss 0.9395 (0.9395) Prec@1 85.156 (85.156)
* epoch: 127 Prec@1 86.787
* epoch: 127 Prec@1 86.787
Checkpoint Saved: output/All/train_2020-03-26-17-10-28_model=MobilenetV3-ep=3000-block=6/checkpoint.pth.tar
Epoch: [128][0/12] Time 1.902 (1.902) Loss 0.1456 (0.1456) Prec@1 95.703 (95.703)
Epoch: [128][10/12] Time 0.160 (0.322) Loss 0.1806 (0.1682) Prec@1 93.750 (94.496)
Test: [0/2] Time 1.711 (1.711) Loss 0.5188 (0.5188) Prec@1 90.625 (90.625)
* epoch: 128 Prec@1 90.691
* epoch: 128 Prec@1 90.691
Checkpoint Saved: output/All/train_2020-03-26-17-10-28_model=MobilenetV3-ep=3000-block=6/checkpoint.pth.tar
Epoch: [129][0/12] Time 1.951 (1.951) Loss 0.0664 (0.0664) Prec@1 98.047 (98.047)
Epoch: [129][10/12] Time 0.160 (0.323) Loss 0.1485 (0.1174) Prec@1 94.922 (96.058)
Test: [0/2] Time 1.704 (1.704) Loss 0.6762 (0.6762) Prec@1 89.062 (89.062)
* epoch: 129 Prec@1 90.090
* epoch: 129 Prec@1 90.090
Checkpoint Saved: output/All/train_2020-03-26-17-10-28_model=MobilenetV3-ep=3000-block=6/checkpoint.pth.tar
Epoch: [130][0/12] Time 1.943 (1.943) Loss 0.2280 (0.2280) Prec@1 92.578 (92.578)
Epoch: [130][10/12] Time 0.160 (0.332) Loss 0.2291 (0.1751) Prec@1 94.531 (93.786)
Test: [0/2] Time 1.717 (1.717) Loss 0.4670 (0.4670) Prec@1 91.406 (91.406)
* epoch: 130 Prec@1 91.892
* epoch: 130 Prec@1 91.892
Checkpoint Saved: output/All/train_2020-03-26-17-10-28_model=MobilenetV3-ep=3000-block=6/checkpoint.pth.tar
Epoch: [131][0/12] Time 2.158 (2.158) Loss 0.1494 (0.1494) Prec@1 94.141 (94.141)
Epoch: [131][10/12] Time 0.159 (0.341) Loss 0.1707 (0.1408) Prec@1 93.750 (94.744)
Test: [0/2] Time 1.699 (1.699) Loss 0.4758 (0.4758) Prec@1 87.891 (87.891)
* epoch: 131 Prec@1 87.387
* epoch: 131 Prec@1 87.387
Checkpoint Saved: output/All/train_2020-03-26-17-10-28_model=MobilenetV3-ep=3000-block=6/checkpoint.pth.tar
Epoch: [132][0/12] Time 1.991 (1.991) Loss 0.1658 (0.1658) Prec@1 92.578 (92.578)
Epoch: [132][10/12] Time 0.159 (0.326) Loss 0.1663 (0.1657) Prec@1 94.922 (94.212)
Test: [0/2] Time 1.708 (1.708) Loss 0.5929 (0.5929) Prec@1 91.016 (91.016)
* epoch: 132 Prec@1 91.892
* epoch: 132 Prec@1 91.892
Checkpoint Saved: output/All/train_2020-03-26-17-10-28_model=MobilenetV3-ep=3000-block=6/checkpoint.pth.tar
Epoch: [133][0/12] Time 1.973 (1.973) Loss 0.0819 (0.0819) Prec@1 97.656 (97.656)
Epoch: [133][10/12] Time 0.159 (0.324) Loss 0.1221 (0.1269) Prec@1 94.922 (95.135)
Test: [0/2] Time 1.696 (1.696) Loss 0.4731 (0.4731) Prec@1 92.578 (92.578)
* epoch: 133 Prec@1 92.793
* epoch: 133 Prec@1 92.793
Checkpoint Saved: output/All/train_2020-03-26-17-10-28_model=MobilenetV3-ep=3000-block=6/checkpoint.pth.tar
Epoch: [134][0/12] Time 1.971 (1.971) Loss 0.0957 (0.0957) Prec@1 98.047 (98.047)
Epoch: [134][10/12] Time 0.160 (0.324) Loss 0.1335 (0.1506) Prec@1 93.359 (94.567)
Test: [0/2] Time 1.715 (1.715) Loss 0.5484 (0.5484) Prec@1 91.797 (91.797)
* epoch: 134 Prec@1 92.192
* epoch: 134 Prec@1 92.192
Checkpoint Saved: output/All/train_2020-03-26-17-10-28_model=MobilenetV3-ep=3000-block=6/checkpoint.pth.tar
Epoch: [135][0/12] Time 2.178 (2.178) Loss 0.1302 (0.1302) Prec@1 93.750 (93.750)
Epoch: [135][10/12] Time 0.161 (0.342) Loss 0.1452 (0.1391) Prec@1 94.922 (94.922)
Test: [0/2] Time 1.709 (1.709) Loss 0.7818 (0.7818) Prec@1 89.453 (89.453)
* epoch: 135 Prec@1 89.489
* epoch: 135 Prec@1 89.489
Checkpoint Saved: output/All/train_2020-03-26-17-10-28_model=MobilenetV3-ep=3000-block=6/checkpoint.pth.tar
Epoch: [136][0/12] Time 1.960 (1.960) Loss 0.1286 (0.1286) Prec@1 96.094 (96.094)
Epoch: [136][10/12] Time 0.160 (0.323) Loss 0.0955 (0.1380) Prec@1 97.656 (94.709)
Test: [0/2] Time 1.717 (1.717) Loss 0.5795 (0.5795) Prec@1 89.453 (89.453)
* epoch: 136 Prec@1 90.090
* epoch: 136 Prec@1 90.090
Checkpoint Saved: output/All/train_2020-03-26-17-10-28_model=MobilenetV3-ep=3000-block=6/checkpoint.pth.tar
Epoch: [137][0/12] Time 1.968 (1.968) Loss 0.1092 (0.1092) Prec@1 94.531 (94.531)
Epoch: [137][10/12] Time 0.160 (0.324) Loss 0.2211 (0.1232) Prec@1 93.359 (95.526)
Number of model parameters: 154706
=> loading checkpoint 'output/Error/12613_model=MobilenetV3-ep=3000-block=4/model_best.pth.tar'
=> loaded checkpoint 'output/Error/12613_model=MobilenetV3-ep=3000-block=4/model_best.pth.tar' (epoch 2877)
* Prec@1 98.193
* Prec@1 98.193
Best accuracy: 98.1927713600986
[validate_2020-03-26-17-26-07] done
[validate_2020-03-26-17-26-07] done
set Type
Number of model parameters: 461559
=> loading checkpoint 'output/All/16166_model=MobilenetV3-ep=3000-block=6/model_best.pth.tar'
=> loaded checkpoint 'output/All/16166_model=MobilenetV3-ep=3000-block=6/model_best.pth.tar' (epoch 2382)
* Prec@1 95.495
* Prec@1 95.495
Best accuracy: 95.49549572460644
[validate_2020-03-26-17-26-14] done
[validate_2020-03-26-17-26-14] done
/home/yh9468/detection/data/Fourth_data/demo Test dir submitted
start test using path : /home/yh9468/detection/data/Fourth_data/demo
Test start
loading checkpoint...
checkpoint already loaded!
start test
data path directory is /home/yh9468/detection/data/Fourth_data/demo
finish test
/home/yh9468/detection/data/Fourth_data/demo/demoset/1-7.bmp test file submitted
start test using path : ('/home/yh9468/detection/data/Fourth_data/demo/demoset/1-7.bmp', 'All Files(*)')
Test start
loading checkpoint...
checkpoint already loaded!
start test
finish test
Number of model parameters: 154706
=> loading checkpoint 'output/Error/12613_model=MobilenetV3-ep=3000-block=4/model_best.pth.tar'
=> loaded checkpoint 'output/Error/12613_model=MobilenetV3-ep=3000-block=4/model_best.pth.tar' (epoch 2877)
* Prec@1 98.193
* Prec@1 98.193
Best accuracy: 98.1927713600986
[validate_2020-03-27-11-52-28] done
[validate_2020-03-27-11-52-28] done
start test using path : ../data/Fourth_data/demo
Test start
loading checkpoint...
checkpoint already loaded!
start test
data path directory is ../data/Fourth_data/demo
Inference time 21 images : 0.37514
finish test
set Type
start test using path : ../data/Fourth_data/demo
Test start
loading checkpoint...
checkpoint already loaded!
start test
data path directory is ../data/Fourth_data/demo
Inference time 21 images : 0.7917
finish test
/home/yh9468/detection/data/Fourth_data/demo/demoset/1-1.bmp test file submitted
start test using path : ('/home/yh9468/detection/data/Fourth_data/demo/demoset/1-1.bmp', 'All Files(*)')
Test start
loading checkpoint...
checkpoint already loaded!
start test
Inference time 1 image : 0.03704
finish test
using user's checkpoint ('E:/code/detection/trainer/output/Error/47098_model=MobilenetV3-ep=3000-block=4/checkpoint.pth.tar', 'All Files(*)')
Number of model parameters: 154706
=> loading checkpoint '('E:/code/detection/trainer/output/Error/47098_model=MobilenetV3-ep=3000-block=4/checkpoint.pth.tar', 'All Files(*)')'
Fatal error in main loop
Traceback (most recent call last):
File "C:\Users\younho\AppData\Local\Programs\Python\Python36\lib\site-packages\torch\serialization.py", line 289, in _check_seekable
f.seek(f.tell())
AttributeError: 'tuple' object has no attribute 'seek'
During handling of the above exception, another exception occurred:
Traceback (most recent call last):
File "E:\code\detection\trainer\test.py", line 256, in main
run_model(args, q)
File "E:\code\detection\trainer\test.py", line 328, in run_model
checkpoint = torch.load(args['checkpoint'])
File "C:\Users\younho\AppData\Local\Programs\Python\Python36\lib\site-packages\torch\serialization.py", line 525, in load
with _open_file_like(f, 'rb') as opened_file:
File "C:\Users\younho\AppData\Local\Programs\Python\Python36\lib\site-packages\torch\serialization.py", line 217, in _open_file_like
return _open_buffer_reader(name_or_buffer)
File "C:\Users\younho\AppData\Local\Programs\Python\Python36\lib\site-packages\torch\serialization.py", line 202, in __init__
_check_seekable(buffer)
File "C:\Users\younho\AppData\Local\Programs\Python\Python36\lib\site-packages\torch\serialization.py", line 292, in _check_seekable
raise_err_msg(["seek", "tell"], e)
File "C:\Users\younho\AppData\Local\Programs\Python\Python36\lib\site-packages\torch\serialization.py", line 285, in raise_err_msg
raise type(e)(msg)
AttributeError: 'tuple' object has no attribute 'seek'. You can only torch.load from a file that is seekable. Please pre-load the data into a buffer like io.BytesIO and try to load from it instead.
[validate_2020-03-31-14-53-49] failed
using user's checkpoint E:/code/detection/trainer/output/Error/47098_model=MobilenetV3-ep=3000-block=4/checkpoint.pth.tar
Number of model parameters: 154706
=> loading checkpoint 'E:/code/detection/trainer/output/Error/47098_model=MobilenetV3-ep=3000-block=4/checkpoint.pth.tar'
Fatal error in main loop
Traceback (most recent call last):
File "E:\code\detection\trainer\test.py", line 261, in main
run_model(args, q)
File "E:\code\detection\trainer\test.py", line 333, in run_model
checkpoint = torch.load(args['checkpoint'])
File "C:\Users\younho\AppData\Local\Programs\Python\Python36\lib\site-packages\torch\serialization.py", line 529, in load
return _legacy_load(opened_file, map_location, pickle_module, **pickle_load_args)
File "C:\Users\younho\AppData\Local\Programs\Python\Python36\lib\site-packages\torch\serialization.py", line 702, in _legacy_load
result = unpickler.load()
File "C:\Users\younho\AppData\Local\Programs\Python\Python36\lib\site-packages\torch\serialization.py", line 665, in persistent_load
deserialized_objects[root_key] = restore_location(obj, location)
File "C:\Users\younho\AppData\Local\Programs\Python\Python36\lib\site-packages\torch\serialization.py", line 156, in default_restore_location
result = fn(storage, location)
File "C:\Users\younho\AppData\Local\Programs\Python\Python36\lib\site-packages\torch\serialization.py", line 132, in _cuda_deserialize
device = validate_cuda_device(location)
File "C:\Users\younho\AppData\Local\Programs\Python\Python36\lib\site-packages\torch\serialization.py", line 116, in validate_cuda_device
raise RuntimeError('Attempting to deserialize object on a CUDA '
RuntimeError: Attempting to deserialize object on a CUDA device but torch.cuda.is_available() is False. If you are running on a CPU-only machine, please use torch.load with map_location=torch.device('cpu') to map your storages to the CPU.
[validate_2020-03-31-14-58-23] failed
using user's checkpoint E:/code/detection/trainer/output/Error/47098_model=MobilenetV3-ep=3000-block=4/checkpoint.pth.tar
Number of model parameters: 154706
=> loading checkpoint 'E:/code/detection/trainer/output/Error/47098_model=MobilenetV3-ep=3000-block=4/checkpoint.pth.tar'
=> loaded checkpoint 'E:/code/detection/trainer/output/Error/47098_model=MobilenetV3-ep=3000-block=4/checkpoint.pth.tar' (epoch 3000)
* Prec@1 91.867
* Prec@1 91.867
Best accuracy: 95.90643257007264
using user's checkpoint E:/code/detection/trainer/output/Error/47098_model=MobilenetV3-ep=3000-block=4/checkpoint.pth.tar
Number of model parameters: 154706
=> loading checkpoint 'E:/code/detection/trainer/output/Error/47098_model=MobilenetV3-ep=3000-block=4/checkpoint.pth.tar'
=> loaded checkpoint 'E:/code/detection/trainer/output/Error/47098_model=MobilenetV3-ep=3000-block=4/checkpoint.pth.tar' (epoch 3000)
Test: [0/2] Time 23.714 (23.714) Loss 0.1847 (0.1847) Prec@1 92.969 (92.969)
Test: [1/2] Time 0.464 (12.089) Loss 0.2262 (0.1942) Prec@1 88.158 (91.867)
* Prec@1 91.867
* Prec@1 91.867
Best accuracy: 95.90643257007264
[validate_2020-03-31-15-08-03] done
[validate_2020-03-31-15-08-03] done
using default checkpoint
Number of model parameters: 154706
=> loading checkpoint 'output/Error/12613_model=MobilenetV3-ep=3000-block=4/model_best.pth.tar'
=> loaded checkpoint 'output/Error/12613_model=MobilenetV3-ep=3000-block=4/model_best.pth.tar' (epoch 2877)
Fatal error in main loop
Traceback (most recent call last):
File "E:\code\detection\trainer\test.py", line 271, in main
run_model(args, q)
File "E:\code\detection\trainer\test.py", line 359, in run_model
prec1 = validate(val_loader, model, criterion, normalize_factor, args ,q)
File "E:\code\detection\trainer\test.py", line 393, in validate
if args['predict']['save']:
KeyError: 'save'
[validate_2020-04-01-18-17-52] failed
Number of model parameters: 154706
=> loading checkpoint 'output/Error/12613_model=MobilenetV3-ep=3000-block=4/model_best.pth.tar'
=> loaded checkpoint 'output/Error/12613_model=MobilenetV3-ep=3000-block=4/model_best.pth.tar' (epoch 2877)
Test: [0/1] Time 26.222 (26.222) Loss 0.2174 (0.2174) Prec@1 94.823 (94.823)
* Prec@1 94.823
* Prec@1 94.823
Best accuracy: 98.1927713600986
[validate_2020-04-01-18-45-23] done
[validate_2020-04-01-18-45-23] done
start test using path : default checkpoint
Test start
using default checkpoint
loading checkpoint...
checkpoint already loaded!
start test
data path directory is ../data/Fourth_data/demo
Inference time 120 images : 4.358
finish test
start test using path : default checkpoint
Test start
using default checkpoint
loading checkpoint...
[Errno 2] No such file or directory: 'n'
checkpoint already loaded!
start test
실행 중 에러가 발생하였습니다. 자세한 사항은 보시려면 로그를 확인해 주세요.
E:/code/detection/data/Fifth_data/All/Flip/1-1.bmp test file submitted
start test using path : default checkpoint
Test start
using default checkpoint
loading checkpoint...
[Errno 2] No such file or directory: 'E'
checkpoint already loaded!
start test
실행 중 에러가 발생하였습니다. 자세한 사항은 보시려면 로그를 확인해 주세요.
start test using path : default checkpoint
Test start
using default checkpoint
loading checkpoint...
[Errno 2] No such file or directory: 'E'
checkpoint already loaded!
start test
실행 중 에러가 발생하였습니다. 자세한 사항은 보시려면 로그를 확인해 주세요.
using default checkpoint
Number of model parameters: 154706
=> loading checkpoint 'output/Error/12613_model=MobilenetV3-ep=3000-block=4/model_best.pth.tar'
=> loaded checkpoint 'output/Error/12613_model=MobilenetV3-ep=3000-block=4/model_best.pth.tar' (epoch 2877)
Test: [0/1] Time 27.498 (27.498) Loss 0.2174 (0.2174) Prec@1 94.823 (94.823)
* Prec@1 94.823
* Prec@1 94.823
Best accuracy: 98.1927713600986
[validate_2020-04-01-20-18-55] done
[validate_2020-04-01-20-18-55] done
using default checkpoint
Number of model parameters: 400114
=> loading checkpoint 'output/Error/12613_model=MobilenetV3-ep=3000-block=4/model_best.pth.tar'
Fatal error in main loop
Traceback (most recent call last):
File "E:\code\detection\trainer\test.py", line 274, in main
run_model(args, q)
File "E:\code\detection\trainer\test.py", line 351, in run_model
model.load_state_dict(checkpoint['state_dict'])
File "C:\Users\younho\AppData\Local\Programs\Python\Python36\lib\site-packages\torch\nn\modules\module.py", line 830, in load_state_dict
self.__class__.__name__, "\n\t".join(error_msgs)))
RuntimeError: Error(s) in loading state_dict for DataParallel:
Missing key(s) in state_dict: "module.features.5.conv.0.weight", "module.features.5.conv.1.weight", "module.features.5.conv.1.bias", "module.features.5.conv.1.running_mean", "module.features.5.conv.1.running_var", "module.features.5.conv.3.weight", "module.features.5.conv.4.weight", "module.features.5.conv.4.bias", "module.features.5.conv.4.running_mean", "module.features.5.conv.4.running_var", "module.features.5.conv.5.fc.0.weight", "module.features.5.conv.5.fc.2.weight", "module.features.5.conv.7.weight", "module.features.5.conv.8.weight", "module.features.5.conv.8.bias", "module.features.5.conv.8.running_mean", "module.features.5.conv.8.running_var", "module.features.6.0.weight", "module.features.6.1.weight", "module.features.6.1.bias", "module.features.6.1.running_mean", "module.features.6.1.running_var", "module.features.8.weight", "module.features.8.bias".
Unexpected key(s) in state_dict: "module.features.5.0.weight", "module.features.5.1.weight", "module.features.5.1.bias", "module.features.5.1.running_mean", "module.features.5.1.running_var", "module.features.5.1.num_batches_tracked", "module.features.7.weight", "module.features.7.bias".
[validate_2020-04-01-20-21-14] failed
Number of model parameters: 400114
=> loading checkpoint 'output/Error/12613_model=MobilenetV3-ep=3000-block=4/model_best.pth.tar'
printlog() missing 2 required positional arguments: 'logger' and 'q'
실행 중 에러가 발생하였습니다. 자세한 사항은 보시려면 로그를 확인해 주세요.
using default checkpoint
Number of model parameters: 154706
=> loading checkpoint 'output/Error/12613_model=MobilenetV3-ep=3000-block=4/model_best.pth.tar'
=> loaded checkpoint 'output/Error/12613_model=MobilenetV3-ep=3000-block=4/model_best.pth.tar' (epoch 2877)
Test: [0/1] Time 26.242 (26.242) Loss 0.2174 (0.2174) Prec@1 94.823 (94.823)
* Prec@1 94.823
* Prec@1 94.823
Best accuracy: 98.1927713600986
[validate_2020-04-01-22-45-31] done
[validate_2020-04-01-22-45-31] done
set Type
start test using path : ../data/Fourth_data/demo
val start
using default checkpoint
Number of model parameters: 461559
=> loading checkpoint 'output/ErrorType/2715_model=MobilenetV3-ep=3000-block=6/model_best.pth.tar'
printlog() missing 2 required positional arguments: 'logger' and 'q'
실행 중 에러가 발생하였습니다. 자세한 사항은 보시려면 로그를 확인해 주세요.
Number of model parameters: 161111
=> loading checkpoint 'output/ErrorType/45385_model=MobilenetV3-ep=3000-block=4/model_best.pth.tar'
=> loaded checkpoint 'output/ErrorType/45385_model=MobilenetV3-ep=3000-block=4/model_best.pth.tar' (epoch 68)
Fatal error in main loop
Traceback (most recent call last):
File "eval_binary_model.py", line 67, in main
run_model(args)
File "eval_binary_model.py", line 132, in run_model
prec1 = validate(val_loader, model, criterion, normalize_factor, args)
File "eval_binary_model.py", line 172, in validate
save_error_case(output.data, target, args, topk=(1,), input=input, save_correct=False)
File "eval_binary_model.py", line 245, in save_error_case
os.mkdir(f"eval_results/{args['task']}/error_case")
FileNotFoundError: [Errno 2] No such file or directory: 'eval_results/ErrorType/error_case'
[eval] failed
Number of model parameters: 161111
=> loading checkpoint 'output/ErrorType/45385_model=MobilenetV3-ep=3000-block=4/model_best.pth.tar'
=> loaded checkpoint 'output/ErrorType/45385_model=MobilenetV3-ep=3000-block=4/model_best.pth.tar' (epoch 68)
Test: [0/3] Time 1.011 (1.011) Loss 0.0242 (0.0242) Prec@1 99.219 (99.219)
* Prec@1 98.519
Best accuracy: 98.51851829246239
[eval] done
Number of model parameters: 161111
=> loading checkpoint 'output/ErrorType/65014_model=MobilenetV3-ep=3000-block=4/model_best.pth.tar'
=> loaded checkpoint 'output/ErrorType/65014_model=MobilenetV3-ep=3000-block=4/model_best.pth.tar' (epoch 48)
Test: [0/3] Time 1.028 (1.028) Loss 0.0223 (0.0223) Prec@1 98.828 (98.828)
* Prec@1 97.037
Best accuracy: 98.14814758300781
[eval] done
Number of model parameters: 161111
=> loading checkpoint 'output/ErrorType/62034_model=MobilenetV3-ep=3000-block=4/model_best.pth.tar'
=> loaded checkpoint 'output/ErrorType/62034_model=MobilenetV3-ep=3000-block=4/model_best.pth.tar' (epoch 122)
Test: [0/3] Time 1.032 (1.032) Loss 0.0093 (0.0093) Prec@1 99.609 (99.609)
* Prec@1 99.074
Best accuracy: 99.07407401756004
[eval] done
Number of model parameters: 161111
=> loading checkpoint 'output/ErrorType/54623_model=MobilenetV3-ep=3000-block=5/model_best.pth.tar'
Fatal error in main loop
Traceback (most recent call last):
File "eval_binary_model.py", line 67, in main
run_model(args)
File "eval_binary_model.py", line 121, in run_model
model.load_state_dict(checkpoint['state_dict'])
File "/usr/local/lib/python3.6/dist-packages/torch/nn/modules/module.py", line 777, in load_state_dict
self.__class__.__name__, "\n\t".join(error_msgs)))
RuntimeError: Error(s) in loading state_dict for DataParallel:
Missing key(s) in state_dict: "module.features.5.0.weight", "module.features.5.1.weight", "module.features.5.1.bias", "module.features.5.1.running_mean", "module.features.5.1.running_var", "module.features.7.weight", "module.features.7.bias".
Unexpected key(s) in state_dict: "module.features.5.conv.0.weight", "module.features.5.conv.1.weight", "module.features.5.conv.1.bias", "module.features.5.conv.1.running_mean", "module.features.5.conv.1.running_var", "module.features.5.conv.1.num_batches_tracked", "module.features.5.conv.3.weight", "module.features.5.conv.4.weight", "module.features.5.conv.4.bias", "module.features.5.conv.4.running_mean", "module.features.5.conv.4.running_var", "module.features.5.conv.4.num_batches_tracked", "module.features.5.conv.5.fc.0.weight", "module.features.5.conv.5.fc.2.weight", "module.features.5.conv.7.weight", "module.features.5.conv.8.weight", "module.features.5.conv.8.bias", "module.features.5.conv.8.running_mean", "module.features.5.conv.8.running_var", "module.features.5.conv.8.num_batches_tracked", "module.features.6.0.weight", "module.features.6.1.weight", "module.features.6.1.bias", "module.features.6.1.running_mean", "module.features.6.1.running_var", "module.features.6.1.num_batches_tracked", "module.features.8.weight", "module.features.8.bias".
[eval] failed
Number of model parameters: 161111
=> loading checkpoint 'output/ErrorType/39396_model=MobilenetV3-ep=3000-block=4/model_best.pth.tar'
=> loaded checkpoint 'output/ErrorType/39396_model=MobilenetV3-ep=3000-block=4/model_best.pth.tar' (epoch 62)
Test: [0/3] Time 1.031 (1.031) Loss 0.0158 (0.0158) Prec@1 98.828 (98.828)
* Prec@1 98.333
Best accuracy: 98.3333332768193
[eval] done
Number of model parameters: 161111
=> loading checkpoint 'output/ErrorType/8817_model=MobilenetV3-ep=3000-block=4/model_best.pth.tar'
=> loaded checkpoint 'output/ErrorType/8817_model=MobilenetV3-ep=3000-block=4/model_best.pth.tar' (epoch 39)
Test: [0/3] Time 1.031 (1.031) Loss 0.0463 (0.0463) Prec@1 97.266 (97.266)
* Prec@1 95.556
Best accuracy: 98.14814758300781
[eval] done
Number of model parameters: 161111
=> loading checkpoint 'output/ErrorType/8817_model=MobilenetV3-ep=3000-block=4/model_best.pth.tar'
=> loaded checkpoint 'output/ErrorType/8817_model=MobilenetV3-ep=3000-block=4/model_best.pth.tar' (epoch 39)
Test: [0/1] Time 0.926 (0.926) Loss 0.1220 (0.1220) Prec@1 98.148 (98.148)
* Prec@1 98.148
Best accuracy: 98.14814758300781
[eval] done
Number of model parameters: 161111
=> loading checkpoint 'output/ErrorType/3843_model=MobilenetV3-ep=3000-block=4/model_best.pth.tar'
=> loaded checkpoint 'output/ErrorType/3843_model=MobilenetV3-ep=3000-block=4/model_best.pth.tar' (epoch 411)
Test: [0/1] Time 0.525 (0.525) Loss 0.1709 (0.1709) Prec@1 98.148 (98.148)
* Prec@1 98.148
Best accuracy: 98.14814758300781
[eval] done
Number of model parameters: 161111
=> loading checkpoint 'output/ErrorType/2798_model=MobilenetV3-ep=3000-block=4/model_best.pth.tar'
=> loaded checkpoint 'output/ErrorType/2798_model=MobilenetV3-ep=3000-block=4/model_best.pth.tar' (epoch 52)
Test: [0/1] Time 0.910 (0.910) Loss 0.1544 (0.1544) Prec@1 98.148 (98.148)
* Prec@1 98.148
Best accuracy: 98.14814758300781
[eval] done
Number of model parameters: 161111
=> loading checkpoint 'output/ErrorType/39396_model=MobilenetV3-ep=3000-block=4/model_best.pth.tar'
=> loaded checkpoint 'output/ErrorType/39396_model=MobilenetV3-ep=3000-block=4/model_best.pth.tar' (epoch 62)
Test: [0/1] Time 0.917 (0.917) Loss 0.1561 (0.1561) Prec@1 98.148 (98.148)
* Prec@1 98.148
Best accuracy: 98.14814758300781
[eval] done
Number of model parameters: 161111
=> loading checkpoint 'output/ErrorType/45385_model=MobilenetV3-ep=3000-block=4/model_best.pth.tar'
=> loaded checkpoint 'output/ErrorType/45385_model=MobilenetV3-ep=3000-block=4/model_best.pth.tar' (epoch 68)
Test: [0/1] Time 0.921 (0.921) Loss 0.2315 (0.2315) Prec@1 98.148 (98.148)
* Prec@1 98.148
Best accuracy: 98.14814758300781
[eval] done
Number of model parameters: 161111
=> loading checkpoint 'output/ErrorType/62034_model=MobilenetV3-ep=3000-block=4/model_best.pth.tar'
=> loaded checkpoint 'output/ErrorType/62034_model=MobilenetV3-ep=3000-block=4/model_best.pth.tar' (epoch 122)
Test: [0/1] Time 0.909 (0.909) Loss 0.1854 (0.1854) Prec@1 98.148 (98.148)
* Prec@1 98.148
Best accuracy: 98.14814758300781
[eval] done
Number of model parameters: 161111
=> loading checkpoint 'output/ErrorType/65014_model=MobilenetV3-ep=3000-block=4/model_best.pth.tar'
=> loaded checkpoint 'output/ErrorType/65014_model=MobilenetV3-ep=3000-block=4/model_best.pth.tar' (epoch 48)
Test: [0/1] Time 0.914 (0.914) Loss 0.1541 (0.1541) Prec@1 98.148 (98.148)
* Prec@1 98.148
Best accuracy: 98.14814758300781
[eval] done
Number of model parameters: 161111
=> loading checkpoint 'output/ErrorType/48996_model=MobilenetV3-ep=3000-block=4/model_best.pth.tar'
=> loaded checkpoint 'output/ErrorType/48996_model=MobilenetV3-ep=3000-block=4/model_best.pth.tar' (epoch 2829)
Test: [0/2] Time 0.719 (0.719) Loss 0.0056 (0.0056) Prec@1 100.000 (100.000)
* Prec@1 99.794
Best accuracy: 99.79423805519387
[eval] done
Number of model parameters: 161111
=> loading checkpoint 'output/ErrorType/48996_model=MobilenetV3-ep=3000-block=4/model_best.pth.tar'
=> loaded checkpoint 'output/ErrorType/48996_model=MobilenetV3-ep=3000-block=4/model_best.pth.tar' (epoch 2829)
Test: [0/3] Time 0.736 (0.736) Loss 0.1127 (0.1127) Prec@1 99.219 (99.219)
* Prec@1 99.630
Best accuracy: 99.62962962962963
[eval] done
Number of model parameters: 161111
=> loading checkpoint 'output/ErrorType/48996_model=MobilenetV3-ep=3000-block=4/model_best.pth.tar'
=> loaded checkpoint 'output/ErrorType/48996_model=MobilenetV3-ep=3000-block=4/model_best.pth.tar' (epoch 2829)
Test: [0/1] Time 0.552 (0.552) Loss 0.5223 (0.5223) Prec@1 98.148 (98.148)
* Prec@1 98.148
Best accuracy: 98.14814758300781
[eval] done
Number of model parameters: 161111
=> loading checkpoint 'output/ErrorType/22101_model=MobilenetV3-ep=3000-block=4/checkpoint.pth.tar'
=> loaded checkpoint 'output/ErrorType/22101_model=MobilenetV3-ep=3000-block=4/checkpoint.pth.tar' (epoch 1472)
Test: [0/22] Time 0.883 (0.883) Loss 0.1991 (0.1991) Prec@1 95.703 (95.703)
Test: [10/22] Time 0.134 (0.225) Loss 0.1018 (0.1421) Prec@1 97.656 (96.023)
Test: [20/22] Time 0.173 (0.185) Loss 0.2016 (0.1328) Prec@1 94.531 (96.038)
* Prec@1 96.052
Best accuracy: 96.0519101479833
[eval] done
Number of model parameters: 461559
Number of model parameters: 161111
=> loading checkpoint 'output/ErrorType/48996_model=MobilenetV3-ep=3000-block=4/model_best.pth.tar'
=> loaded checkpoint 'output/ErrorType/48996_model=MobilenetV3-ep=3000-block=4/model_best.pth.tar' (epoch 2829)
Test: [0/3] Time 1.110 (1.110) Loss 0.0068 (0.0068) Prec@1 99.609 (99.609)
Test: [1/3] Time 0.131 (0.620) Loss 0.1115 (0.0591) Prec@1 99.609 (99.609)
Test: [2/3] Time 0.036 (0.426) Loss 0.0000 (0.0561) Prec@1 100.000 (99.630)
* Prec@1 99.630
Best accuracy: 99.62962962962963
[eval] done
Number of model parameters: 161111
=> loading checkpoint 'output/ErrorType/60092_model=MobilenetV3-ep=3000-block=4/checkpoint.pth.tar'
=> loaded checkpoint 'output/ErrorType/60092_model=MobilenetV3-ep=3000-block=4/checkpoint.pth.tar' (epoch 3000)
Test: [0/3] Time 0.769 (0.769) Loss 2.6269 (2.6269) Prec@1 57.031 (57.031)
Test: [1/3] Time 0.117 (0.443) Loss 2.9399 (2.7834) Prec@1 51.953 (54.492)
Test: [2/3] Time 0.038 (0.308) Loss 2.8111 (2.7848) Prec@1 53.571 (54.444)
* Prec@1 54.444
Best accuracy: 98.14814758300781
[eval] done
Number of model parameters: 161111
=> loading checkpoint 'output/ErrorType/60092_model=MobilenetV3-ep=3000-block=4/checkpoint.pth.tar'
=> loaded checkpoint 'output/ErrorType/60092_model=MobilenetV3-ep=3000-block=4/checkpoint.pth.tar' (epoch 3000)
Test: [0/3] Time 0.919 (0.919) Loss 0.0760 (0.0760) Prec@1 99.609 (99.609)
Test: [1/3] Time 0.151 (0.535) Loss 0.0059 (0.0410) Prec@1 99.609 (99.609)
Test: [2/3] Time 0.052 (0.374) Loss 0.0000 (0.0388) Prec@1 100.000 (99.630)
* Prec@1 99.630
Best accuracy: 99.62962962962963
[eval] done
Number of model parameters: 159830
=> loading checkpoint 'output/ErrorType/51891_model=MobilenetV3-ep=3000-block=4/checkpoint.pth.tar'
=> loaded checkpoint 'output/ErrorType/51891_model=MobilenetV3-ep=3000-block=4/checkpoint.pth.tar' (epoch 967)
Test: [0/10] Time 1.848 (1.848) Loss 0.0940 (0.0940) Prec@1 96.875 (96.875)
Test: [1/10] Time 0.033 (0.940) Loss 0.1398 (0.1169) Prec@1 96.484 (96.680)
Test: [2/10] Time 0.020 (0.634) Loss 0.0827 (0.1055) Prec@1 98.828 (97.396)
Test: [3/10] Time 0.022 (0.481) Loss 0.0788 (0.0988) Prec@1 98.047 (97.559)
Test: [4/10] Time 0.022 (0.389) Loss 0.0631 (0.0917) Prec@1 98.047 (97.656)
Test: [5/10] Time 0.026 (0.329) Loss 0.0881 (0.0911) Prec@1 97.266 (97.591)
Test: [6/10] Time 0.026 (0.285) Loss 0.1012 (0.0925) Prec@1 97.656 (97.600)
Test: [7/10] Time 0.051 (0.256) Loss 0.0947 (0.0928) Prec@1 96.484 (97.461)
Test: [8/10] Time 0.051 (0.233) Loss 0.1026 (0.0939) Prec@1 96.094 (97.309)
Test: [9/10] Time 0.161 (0.226) Loss 0.0329 (0.0900) Prec@1 99.363 (97.440)
* Prec@1 97.440
Best accuracy: 97.44006518472844
[eval] done
Number of model parameters: 159830
=> loading checkpoint 'output/ErrorType/51891_model=MobilenetV3-ep=3000-block=4/checkpoint.pth.tar'
=> loaded checkpoint 'output/ErrorType/51891_model=MobilenetV3-ep=3000-block=4/checkpoint.pth.tar' (epoch 1013)
Test: [0/10] Time 1.797 (1.797) Loss 0.1185 (0.1185) Prec@1 98.047 (98.047)
Test: [1/10] Time 0.056 (0.926) Loss 0.1336 (0.1260) Prec@1 95.703 (96.875)
Test: [2/10] Time 0.024 (0.625) Loss 0.0722 (0.1081) Prec@1 98.047 (97.266)
Test: [3/10] Time 0.025 (0.475) Loss 0.0833 (0.1019) Prec@1 97.656 (97.363)
Test: [4/10] Time 0.049 (0.390) Loss 0.1214 (0.1058) Prec@1 97.656 (97.422)
Test: [5/10] Time 0.023 (0.329) Loss 0.0616 (0.0984) Prec@1 98.438 (97.591)
Test: [6/10] Time 0.034 (0.287) Loss 0.0906 (0.0973) Prec@1 96.875 (97.489)
Test: [7/10] Time 0.032 (0.255) Loss 0.0922 (0.0967) Prec@1 96.875 (97.412)
Test: [8/10] Time 0.025 (0.229) Loss 0.0631 (0.0929) Prec@1 98.047 (97.483)
Test: [9/10] Time 0.127 (0.219) Loss 0.1669 (0.0977) Prec@1 96.815 (97.440)
* Prec@1 97.440
Best accuracy: 97.4400648933172
[eval] done
Number of model parameters: 159830
=> loading checkpoint 'output/ErrorType/16926_model=MobilenetV3-ep=3000-block=4/model_best.pth.tar'
=> loaded checkpoint 'output/ErrorType/16926_model=MobilenetV3-ep=3000-block=4/model_best.pth.tar' (epoch 2813)
Test: [0/10] Time 0.771 (0.771) Loss 0.0751 (0.0751) Prec@1 98.828 (98.828)
Test: [1/10] Time 0.016 (0.393) Loss 0.0138 (0.0444) Prec@1 99.609 (99.219)
Test: [2/10] Time 0.009 (0.265) Loss 0.0320 (0.0403) Prec@1 98.438 (98.958)
Test: [3/10] Time 0.009 (0.201) Loss 0.1304 (0.0628) Prec@1 97.656 (98.633)
Test: [4/10] Time 0.008 (0.163) Loss 0.0823 (0.0667) Prec@1 98.828 (98.672)
Test: [5/10] Time 0.009 (0.137) Loss 0.0715 (0.0675) Prec@1 97.266 (98.438)
Test: [6/10] Time 0.008 (0.119) Loss 0.0507 (0.0651) Prec@1 99.609 (98.605)
Test: [7/10] Time 0.009 (0.105) Loss 0.0677 (0.0654) Prec@1 98.047 (98.535)
Test: [8/10] Time 0.009 (0.094) Loss 0.0512 (0.0639) Prec@1 97.656 (98.438)
Test: [9/10] Time 0.055 (0.090) Loss 0.1003 (0.0662) Prec@1 96.815 (98.334)
* Prec@1 98.334
Best accuracy: 98.33401044390638
[eval] done
Number of model parameters: 159830
=> loading checkpoint 'output/ErrorType/16926_model=MobilenetV3-ep=3000-block=4/model_best.pth.tar'
=> loaded checkpoint 'output/ErrorType/16926_model=MobilenetV3-ep=3000-block=4/model_best.pth.tar' (epoch 2813)
Test: [0/1] Time 0.756 (0.756) Loss 0.3889 (0.3889) Prec@1 94.309 (94.309)
* Prec@1 94.309
Best accuracy: 94.30894470214844
[eval] done
Number of model parameters: 460278
=> loading checkpoint 'output/ErrorType/2715_model=MobilenetV3-ep=3000-block=6/model_best.pth.tar'
=> loaded checkpoint 'output/ErrorType/2715_model=MobilenetV3-ep=3000-block=6/model_best.pth.tar' (epoch 1865)
Test: [0/1] Time 2.244 (2.244) Loss 0.4449 (0.4449) Prec@1 92.952 (92.952)
* Prec@1 92.952
Best accuracy: 92.9515380859375
[eval] done
Number of model parameters: 460278
=> loading checkpoint 'output/ErrorType/2715_model=MobilenetV3-ep=3000-block=6/model_best.pth.tar'
=> loaded checkpoint 'output/ErrorType/2715_model=MobilenetV3-ep=3000-block=6/model_best.pth.tar' (epoch 1865)
Test: [0/1] Time 2.112 (2.112) Loss 0.4449 (0.4449) Prec@1 92.952 (92.952)
* Prec@1 92.952
Best accuracy: 92.9515380859375
[eval] done
Number of model parameters: 460278
=> loading checkpoint 'output/ErrorType/2715_model=MobilenetV3-ep=3000-block=6/model_best.pth.tar'
=> loaded checkpoint 'output/ErrorType/2715_model=MobilenetV3-ep=3000-block=6/model_best.pth.tar' (epoch 1865)
Test: [0/1] Time 29.993 (29.993) Loss 0.4449 (0.4449) Prec@1 92.952 (92.952)
* Prec@1 92.952
* Prec@1 92.952
Best accuracy: 92.9515380859375
[validate_2020-03-31-16-28-46] done
[validate_2020-03-31-16-28-46] done
set All processing
start test using path : ../data/Fourth_data/demo
Test start
[Errno 2] No such file or directory: 'configs/overall_config.yaml'
실행 중 에러가 발생하였습니다. 자세한 사항은 보시려면 로그를 확인해 주세요.
using default checkpoint
Number of model parameters: 460278
=> loading checkpoint 'output/ErrorType/2715_model=MobilenetV3-ep=3000-block=6/model_best.pth.tar'
=> loaded checkpoint 'output/ErrorType/2715_model=MobilenetV3-ep=3000-block=6/model_best.pth.tar' (epoch 1865)
Fatal error in main loop
Traceback (most recent call last):
File "E:\code\detection\trainer\test.py", line 270, in main
run_model(args, q)
File "E:\code\detection\trainer\test.py", line 358, in run_model
prec1 = validate(val_loader, model, criterion, normalize_factor, args ,q)
File "E:\code\detection\trainer\test.py", line 392, in validate
if args['predict']['save']:
KeyError: 'save'
[validate_2020-03-31-18-52-03] failed
using default checkpoint
Number of model parameters: 461559
=> loading checkpoint 'output/ErrorType/2715_model=MobilenetV3-ep=3000-block=6/model_best.pth.tar'
printlog() missing 2 required positional arguments: 'logger' and 'q'
실행 중 에러가 발생하였습니다. 자세한 사항은 보시려면 로그를 확인해 주세요.
using default checkpoint
Number of model parameters: 461559
=> loading checkpoint 'output/ErrorType/2715_model=MobilenetV3-ep=3000-block=6/model_best.pth.tar'
실행 중 에러가 발생하였습니다. 자세한 사항은 보시려면 로그를 확인해 주세요.
실행 중 에러가 발생하였습니다. 자세한 사항은 보시려면 로그를 확인해 주세요.
Error(s) in loading state_dict for DataParallel:
size mismatch for module.classifier.1.weight: copying a param with shape torch.Size([6, 1280]) from checkpoint, the shape in current model is torch.Size([7, 1280]).
size mismatch for module.classifier.1.bias: copying a param with shape torch.Size([6]) from checkpoint, the shape in current model is torch.Size([7]).
start test using path : ../data/Fourth_data/demo
val start
using default checkpoint
Number of model parameters: 461559
=> loading checkpoint 'output/ErrorType/85804_model=MobilenetV3-ep=3000-block=4/model_best.pth.tar'
실행 중 에러가 발생하였습니다. 자세한 사항은 보시려면 로그를 확인해 주세요.
실행 중 에러가 발생하였습니다. 자세한 사항은 보시려면 로그를 확인해 주세요.
Error(s) in loading state_dict for DataParallel:
Missing key(s) in state_dict: "module.features.5.conv.0.weight", "module.features.5.conv.1.weight", "module.features.5.conv.1.bias", "module.features.5.conv.1.running_mean", "module.features.5.conv.1.running_var", "module.features.5.conv.3.weight", "module.features.5.conv.4.weight", "module.features.5.conv.4.bias", "module.features.5.conv.4.running_mean", "module.features.5.conv.4.running_var", "module.features.5.conv.5.fc.0.weight", "module.features.5.conv.5.fc.2.weight", "module.features.5.conv.7.weight", "module.features.5.conv.8.weight", "module.features.5.conv.8.bias", "module.features.5.conv.8.running_mean", "module.features.5.conv.8.running_var", "module.features.6.conv.0.weight", "module.features.6.conv.1.weight", "module.features.6.conv.1.bias", "module.features.6.conv.1.running_mean", "module.features.6.conv.1.running_var", "module.features.6.conv.3.weight", "module.features.6.conv.4.weight", "module.features.6.conv.4.bias", "module.features.6.conv.4.running_mean", "module.features.6.conv.4.running_var", "module.features.6.conv.5.fc.0.weight", "module.features.6.conv.5.fc.2.weight", "module.features.6.conv.7.weight", "module.features.6.conv.8.weight", "module.features.6.conv.8.bias", "module.features.6.conv.8.running_mean", "module.features.6.conv.8.running_var", "module.features.7.0.weight", "module.features.7.1.weight", "module.features.7.1.bias", "module.features.7.1.running_mean", "module.features.7.1.running_var", "module.features.9.weight", "module.features.9.bias".
Unexpected key(s) in state_dict: "module.features.5.0.weight", "module.features.5.1.weight", "module.features.5.1.bias", "module.features.5.1.running_mean", "module.features.5.1.running_var", "module.features.5.1.num_batches_tracked", "module.features.7.weight", "module.features.7.bias".
Number of model parameters: 461559
=> loading checkpoint 'output/ErrorType/85804_model=MobilenetV3-ep=3000-block=4/model_best.pth.tar'
실행 중 에러가 발생하였습니다. 자세한 사항은 보시려면 로그를 확인해 주세요.
실행 중 에러가 발생하였습니다. 자세한 사항은 보시려면 로그를 확인해 주세요.
Error(s) in loading state_dict for DataParallel:
Missing key(s) in state_dict: "module.features.5.conv.0.weight", "module.features.5.conv.1.weight", "module.features.5.conv.1.bias", "module.features.5.conv.1.running_mean", "module.features.5.conv.1.running_var", "module.features.5.conv.3.weight", "module.features.5.conv.4.weight", "module.features.5.conv.4.bias", "module.features.5.conv.4.running_mean", "module.features.5.conv.4.running_var", "module.features.5.conv.5.fc.0.weight", "module.features.5.conv.5.fc.2.weight", "module.features.5.conv.7.weight", "module.features.5.conv.8.weight", "module.features.5.conv.8.bias", "module.features.5.conv.8.running_mean", "module.features.5.conv.8.running_var", "module.features.6.conv.0.weight", "module.features.6.conv.1.weight", "module.features.6.conv.1.bias", "module.features.6.conv.1.running_mean", "module.features.6.conv.1.running_var", "module.features.6.conv.3.weight", "module.features.6.conv.4.weight", "module.features.6.conv.4.bias", "module.features.6.conv.4.running_mean", "module.features.6.conv.4.running_var", "module.features.6.conv.5.fc.0.weight", "module.features.6.conv.5.fc.2.weight", "module.features.6.conv.7.weight", "module.features.6.conv.8.weight", "module.features.6.conv.8.bias", "module.features.6.conv.8.running_mean", "module.features.6.conv.8.running_var", "module.features.7.0.weight", "module.features.7.1.weight", "module.features.7.1.bias", "module.features.7.1.running_mean", "module.features.7.1.running_var", "module.features.9.weight", "module.features.9.bias".
Unexpected key(s) in state_dict: "module.features.5.0.weight", "module.features.5.1.weight", "module.features.5.1.bias", "module.features.5.1.running_mean", "module.features.5.1.running_var", "module.features.5.1.num_batches_tracked", "module.features.7.weight", "module.features.7.bias".
using default checkpoint
Number of model parameters: 161111
=> loading checkpoint 'output/ErrorType/85804_model=MobilenetV3-ep=3000-block=4/model_best.pth.tar'
=> loaded checkpoint 'output/ErrorType/85804_model=MobilenetV3-ep=3000-block=4/model_best.pth.tar' (epoch 1884)
Test: [0/1] Time 25.026 (25.026) Loss 0.2817 (0.2817) Prec@1 95.918 (95.918)
* Prec@1 95.918
* Prec@1 95.918
Best accuracy: 95.91837310791016
[validate_2020-04-01-22-59-05] done
[validate_2020-04-01-22-59-05] done
set All processing
start test using path : ../data/Fourth_data/demo
val start
using default checkpoint
Number of model parameters: 462840
=> loading checkpoint 'output/All/14835_model=MobilenetV3-ep=3000-block=6/model_best.pth.tar'
=> loaded checkpoint 'output/All/14835_model=MobilenetV3-ep=3000-block=6/model_best.pth.tar' (epoch 1617)
Test: [0/3] Time 31.288 (31.288) Loss 0.2660 (0.2660) Prec@1 95.703 (95.703)
Test: [1/3] Time 7.587 (19.437) Loss 0.3209 (0.2934) Prec@1 95.312 (95.508)
Test: [2/3] Time 6.625 (15.167) Loss 0.1835 (0.2602) Prec@1 96.396 (95.777)
* Prec@1 95.777
* Prec@1 95.777
Best accuracy: 95.77656669512757
[validate_2020-04-01-23-00-04] done
[validate_2020-04-01-23-00-04] done
set error
Test를 수행하기 위해 데이터를 입력해 주세요.
Test를 수행하기 위해 데이터를 입력해 주세요.
using user's checkpoint E:/code/detection/trainer/output/ErrorType/85804_model=MobilenetV3-ep=3000-block=4/checkpoint.pth.tar
Number of model parameters: 161111
=> loading checkpoint 'E:/code/detection/trainer/output/ErrorType/85804_model=MobilenetV3-ep=3000-block=4/checkpoint.pth.tar'
=> loaded checkpoint 'E:/code/detection/trainer/output/ErrorType/85804_model=MobilenetV3-ep=3000-block=4/checkpoint.pth.tar' (epoch 3000)
Test: [0/2] Time 26.118 (26.118) Loss 0.2720 (0.2720) Prec@1 93.359 (93.359)
Test: [1/2] Time 0.848 (13.483) Loss 0.4744 (0.3686) Prec@1 91.453 (92.449)
* Prec@1 92.449
* Prec@1 92.449
Best accuracy: 95.91836762720224
[validate_2020-04-03-17-24-24] done
[validate_2020-04-03-17-24-24] done
set All processing
E:/code/detection/trainer/output/All/14835_model=MobilenetV3-ep=3000-block=6/checkpoint.pth.tar checkpoint file submitted
E:/code/detection/data/Fifth_data/All Test dir submitted
val start
using user's checkpoint E:/code/detection/trainer/output/All/14835_model=MobilenetV3-ep=3000-block=6/checkpoint.pth.tar
Number of model parameters: 462840
=> loading checkpoint 'E:/code/detection/trainer/output/All/14835_model=MobilenetV3-ep=3000-block=6/checkpoint.pth.tar'
=> loaded checkpoint 'E:/code/detection/trainer/output/All/14835_model=MobilenetV3-ep=3000-block=6/checkpoint.pth.tar' (epoch 3000)
Test: [0/3] Time 32.591 (32.591) Loss 0.1575 (0.1575) Prec@1 94.531 (94.531)
Test: [1/3] Time 8.179 (20.385) Loss 0.2475 (0.2025) Prec@1 93.750 (94.141)
Test: [2/3] Time 7.374 (16.048) Loss 0.4568 (0.2794) Prec@1 94.595 (94.278)
* Prec@1 94.278
* Prec@1 94.278
Best accuracy: 96.04904700754774
[validate_2020-04-03-17-39-50] done
[validate_2020-04-03-17-39-50] done
E:/code/detection/data/Fifth_data/All/Empty/1-5.bmp test file submitted
Test start
start test using path : E:/code/detection/trainer/output/All/14835_model=MobilenetV3-ep=3000-block=6/checkpoint.pth.tar
using user's checkpoint E:/code/detection/trainer/output/All/14835_model=MobilenetV3-ep=3000-block=6/checkpoint.pth.tar
loading checkpoint...
checkpoint already loaded!
start test
single_file_test() missing 1 required positional argument: 'q'
실행 중 에러가 발생하였습니다. 자세한 사항은 보시려면 로그를 확인해 주세요.
import argparse
import random
import os
import cv2
import logging
import datetime
import torch
import torch.nn as nn
import torchvision.datasets as datasets
import torchvision.transforms as transforms
from torchvision.utils import save_image
from model import mobilenetv3
from utils import get_args_from_yaml, MyImageFolder
from get_mean_std import get_params
## 해당 코드는 전체 inference를 모두 담은 code.
# make Logger
logger = logging.getLogger(os.path.dirname(__name__))
logger.setLevel(logging.INFO)
# make Logger stream
streamHandler = logging.StreamHandler()
logger.addHandler(streamHandler)
if not os.path.exists('eval_results/main'):
os.mkdir('eval_results/main')
if not os.path.exists('eval_results/main/Normal'):
os.mkdir('eval_results/main/Normal')
if not os.path.exists('eval_results/main/Crack'):
os.mkdir('eval_results/main/Crack')
if not os.path.exists('eval_results/main/Empty'):
os.mkdir('eval_results/main/Empty')
if not os.path.exists('eval_results/main/Flip'):
os.mkdir('eval_results/main/Flip')
if not os.path.exists('eval_results/main/Pollute'):
os.mkdir('eval_results/main/Pollute')
if not os.path.exists('eval_results/main/Double'):
os.mkdir('eval_results/main/Double')
if not os.path.exists('eval_results/main/Leave'):
os.mkdir('eval_results/main/Leave')
if not os.path.exists('eval_results/main/Scratch'):
os.mkdir('eval_results/main/Scratch')
def main(Error_args, Error_Type_args):
logdir = f"logs/main/"
if not os.path.exists(logdir):
os.mkdir(logdir)
fileHander = logging.FileHandler(logdir + f"{datetime.datetime.now().strftime('%Y%m%d-%H:%M:%S')}_log.log")
logger.addHandler(fileHander)
run(Error_args, Error_Type_args)
def run(Error_args, Error_Type_args):
Error_args['checkpoint'] = "output/Error/25678_model=MobilenetV3-ep=3000-block=4/checkpoint.pth.tar"
Error_Type_args['checkpoint'] = "output/ErrorType/2798_model=MobilenetV3-ep=3000-block=4/checkpoint.pth.tar"
Error_model = mobilenetv3(n_class= Error_args['model']['class'], blocknum=Error_args['model']['blocks'])
Error_Type_model = mobilenetv3(n_class=Error_Type_args['model']['class'], blocknum=Error_Type_args['model']['blocks'])
gpus = Error_args['gpu']
resize_size = Error_args['train']['size']
torch.cuda.set_device(gpus[0])
with torch.cuda.device(gpus[0]):
Error_model = Error_model.cuda()
Error_Type_model = Error_Type_model.cuda()
Error_model = torch.nn.DataParallel(Error_model, device_ids=gpus, output_device=gpus[0])
Error_Type_model = torch.nn.DataParallel(Error_Type_model, device_ids=gpus, output_device=gpus[0])
Error_checkpoint = torch.load(Error_args['checkpoint'])
Error_Type_checkpoint = torch.load(Error_Type_args['checkpoint'])
Error_model.load_state_dict(Error_checkpoint['state_dict'])
Error_Type_model.load_state_dict(Error_Type_checkpoint['state_dict'])
mean, std = get_params(Error_args['data']['test'], resize_size)
normalize = transforms.Normalize(mean=[mean[0].item()],
std=[std[0].item()])
transform = transforms.Compose([
transforms.Resize((resize_size, resize_size)),
transforms.Grayscale(),
transforms.ToTensor(),
normalize
])
dataset = MyImageFolder(Error_args['data']['test'], transform)
print(len(dataset))
loader = torch.utils.data.DataLoader(
dataset, batch_size=Error_args['predict']['batch-size'], shuffle=False,
num_workers=Error_args['predict']['worker'], pin_memory=True
)
for data in loader:
(input, _), (path, _) = data
input= input.cuda()
output = Error_model(input)
_, output = output.topk(1 ,1 ,True,True)
error_cases = torch.ones((1,1,64,64)).cuda()
new_paths = []
error = 0
normal = 0
for idx in range(input.shape[0]):
# if Error Case
if output[idx] == 0:
error_cases = torch.cat((error_cases, input[idx:idx+1]), dim=0)
new_paths.append(path[idx])
error = error +1
# Normal Case
else:
img = cv2.imread(path[idx])
cv2.imwrite(f"eval_results/main/Normal/{path[idx].split('/')[-1]}", img)
normal = normal+1
print(f"error path : {len(new_paths)}")
print(f"error : {error}")
print(f"normal : {normal}")
error_cases = error_cases[1:]
print(error_cases.shape[0])
output = Error_Type_model(error_cases)
_, output = output.topk(1 ,1 ,True,True)
for idx in range(error_cases.shape[0]):
# Crack
if output[idx] == 0:
img = cv2.imread(new_paths[idx])
cv2.imwrite(f"eval_results/main/Crack/{new_paths[idx].split('/')[-1]}", img)
# Double
elif output[idx] == 1:
img = cv2.imread(new_paths[idx])
cv2.imwrite(f"eval_results/main/Double/{new_paths[idx].split('/')[-1]}", img)
# Empty
elif output[idx] == 2:
img = cv2.imread(new_paths[idx])
cv2.imwrite(f"eval_results/main/Empty/{new_paths[idx].split('/')[-1]}", img)
# Flip
elif output[idx] == 3:
img = cv2.imread(new_paths[idx])
cv2.imwrite(f"eval_results/main/Flip/{new_paths[idx].split('/')[-1]}", img)
# Leave
elif output[idx] == 4:
img = cv2.imread(new_paths[idx])
cv2.imwrite(f"eval_results/main/Leave/{new_paths[idx].split('/')[-1]}", img)
# Pollute
elif output[idx] == 5:
img = cv2.imread(new_paths[idx])
cv2.imwrite(f"eval_results/main/Pollute/{new_paths[idx].split('/')[-1]}", img)
# Scratch
elif output[idx] == 6:
img = cv2.imread(new_paths[idx])
cv2.imwrite(f"eval_results/main/Scratch/{new_paths[idx].split('/')[-1]}", img)
if __name__ == '__main__':
Error_args = get_args_from_yaml("configs/Error_config.yml")
Error_Type_args = get_args_from_yaml("configs/ErrorType_config.yml")
main(Error_args, Error_Type_args)
\ No newline at end of file
import torch
import torch.nn as nn
from model import mobilenetv3
import argparse
import torchvision
from torchvision.transforms import transforms
import torchvision.datasets as datasets
from augmentations import RandAugment
from get_mean_std import get_params
from torch.utils.data.sampler import SubsetRandomSampler
import numpy as np
import os
import cv2
from utils import MyImageFolder
class ConcatDataset(torch.utils.data.Dataset):
def __init__(self, *datasets):
self.datasets = datasets
def __getitem__(self, i):
return tuple(d[i %len(d)] for d in self.datasets)
def __len__(self):
return max(len(d) for d in self.datasets)
def make_dir():
if not os.path.exists('../data/Fourth_data/teacher_data/Double'):
os.mkdir('../data/Fourth_data/teacher_data/Double')
if not os.path.exists('../data/Fourth_data/teacher_data/Flip'):
os.mkdir('../data/Fourth_data/teacher_data/Flip')
if not os.path.exists('../data/Fourth_data/teacher_data/Scratch'):
os.mkdir('../data/Fourth_data/teacher_data/Scratch')
if not os.path.exists('../data/Fourth_data/teacher_data/Leave'):
os.mkdir('../data/Fourth_data/teacher_data/Leave')
if not os.path.exists('../data/Fourth_data/teacher_data/Normal'):
os.mkdir('../data/Fourth_data/teacher_data/Normal')
if not os.path.exists('../data/Fourth_data/teacher_data/Empty'):
os.mkdir('../data/Fourth_data/teacher_data/Empty')
parser = argparse.ArgumentParser(description='Process make noisy student model')
parser.add_argument('--checkpoint_path', type=str, help='checkpoint path')
parser.add_argument('--size', type=int, help='resize integer of input')
parser.add_argument('--batch_size', type=int, default=256,help='set batch size')
parser.add_argument('--teacher_checkpoint_path', type=str, help='teacher first checkpoint path')
parser.add_argument('--Labeled_dataset_path', default='../data/Fourth_data/noisy_data/Labeled', type=str, help='path of dataset')
parser.add_argument('--Unlabeled_dataset_path', default='../data/Fourth_data/noisy_data/Unlabeled', type=str, help='path of unlabeled dataset')
parser.add_argument('--num_workers', default=8, type=int, help="number of gpu worker")
parser.add_argument('--epochs', default=350, type=int, help='epoch')
parser.add_argument('--finetune_epochs', default=2, type=int, help='finetuning epochs')
parser.add_argument('--data_save_path', default='../data/Fourth_data/teacher_data', type=str, help='teacher save unlabeled data in this path')
args = parser.parse_args()
print(args)
# by paper of https://arxiv.org/pdf/1911.04252.pdf
Aug_number = 2
Aug_magnitude = 27
#my customize network
blocks = [4,5,6,7,8]
# data loader parameters
kwargs = {'num_workers': args.num_workers, 'pin_memory': True}
Labeled_mean, Labeled_std = get_params(args.Labeled_dataset_path, args.size)
Unlabeled_mean, Unlabeled_std = get_params(args.Unlabeled_dataset_path, args.size)
transform_labeled = transforms.Compose([
transforms.Resize((args.size, args.size)),
transforms.RandomCrop(args.size, padding=4),
transforms.RandomHorizontalFlip(),
transforms.ToTensor(),
transforms.Normalize(Labeled_mean[0].item(), Labeled_std[0].item())
])
#이건 Teacher가 raw data를 받아서 판단하는거기 때문에 따로 Augmentation할 필요 x
transform_unlabeled = transforms.Compose([
transforms.Resize((args.size, args.size)),
transforms.RandomCrop(args.size, padding=4),
transforms.RandomHorizontalFlip(),
transforms.ToTensor(),
transforms.Normalize(Unlabeled_mean[0].item(), Unlabeled_std[0].item())
])
# Add RandAugment with N, M(hyperparameter)
transform_labeled.transforms.insert(0, RandAugment(Aug_number, Aug_magnitude))
# set dataset
Labeled_dataset = datasets.ImageFolder(args.Labeled_dataset_path, transform_labeled)
Unlabeled_dataset = MyImageFolder(args.Unlabeled_dataset_path, transform_unlabeled)
labeled_data_loader = torch.utils.data.DataLoader(
Labeled_dataset, batch_size=args.batch_size, shuffle=True, **kwargs)
unlabeled_data_loader = torch.utils.data.DataLoader(
Unlabeled_dataset, batch_size=args.batch_size, shuffle=True, **kwargs)
# noisy teacher은 student보다 더 작게 설정하며, dropout을 0으로 설정.
noisy_teacher_model = mobilenetv3(n_class=2, dropout=0.0, blocknum=4)
checkpoint = torch.load(args.teacher_checkpoint_path)
noisy_teacher_model.load_state_dict(checkpoint['state_dict'])
# make loss function
criterion = nn.CrossEntropyLoss()
# make class directory
make_dir()
classes = os.listdir(args.data_save_path)
classes.sort()
for block in blocks:
#noisy student는 더 크게 설정하고 dropout은 논문에 나와있는대로 0.5로 설정.
noisy_student_model = mobilenetv3(n_class=2, dropout=0.5, blocknum=block, stochastic=True)
noisy_student_model.cuda()
noisy_teacher_model.cuda()
criterion.cuda()
# make optimizer same as official code lr = 0.128 and decays by 0.97 every 2.4epochs
optimizer = torch.optim.RMSprop(noisy_student_model.parameters(), lr=0.128, weight_decay=0.9, momentum=0.9)
# exp scheduler like tf offical code
scheduler = torch.optim.lr_scheduler.ExponentialLR(optimizer,0.7)
for epoch in range(args.epochs):
# unlabeled data를 labeling하는 과정.
for idx, data in enumerate(unlabeled_data_loader):
(unlabeled_input, _), (path, _) = data
unlabeled_input = unlabeled_input.cuda()
output=noisy_teacher_model(unlabeled_input)
prob = F.softmax(output, dim=1)
for idx, p in enumerate(prob):
indices = torch.topk(p,1).indices.tolist()
img = cv2.imread(path[idx])
cv2.imwrite(f"{args.data_save_path}/{classes[indices[0]]}/{path[idx].split('/')[-1]}", img)
# teacher 모델이 구성한 data에 대해서 다시 loader 구성.
transform_teacher_data = transforms.Compose([
transforms.Resize((args.size, args.size)),
transforms.RandomCrop(args.size, padding=4),
transforms.RandomHorizontalFlip(),
transforms.ToTensor(),
transforms.Normalize(Unlabeled_mean[0].item(), Unlabeled_std[0].item())
])
transform_teacher_data.transforms.insert(0, RandAugment(Aug_number, Aug_magnitude))
teacher_data = datasets.ImageFolder(args.data_save_path, transform_teacher_data)
teacher_data_loader = torch.utils.data.DataLoader(
teacher_data, batch_size=args.batch_size, shuffle=True, **kwargs)
merged_dataset = ConcatDataset(teacher_data_loader, labeled_data_loader) #앞은 teacher가 예측한거 뒤는 실제 데이터
merged_data_loader = torch.utils.data.DataLoader(
merged_dataset, batch_size=args.batch_size, shuffle=True, num_workers=8, pin_memory=True)
#일단 코드상으로는 unlabeled된 data에 대해서 hard하게 구성. todo: soft labeling.
for i, (input, target) in enumerate(merged_data_loader):
input = input.cuda()
target = target.cuda()
output = noisy_student_model(input)
loss = criterion(target, output)
optimizer.zero_grad()
loss.backward()
optimizer.step()
#논문에서는 2.4epoch마다라고 하였지만 현재는 2에폭마다로 설정.
if epoch % 2 == 0:
scheduler.step()
# iterative learning.
noisy_teacher_model = noisy_student_model
\ No newline at end of file
from model import mobilenetv3
import torch
import torch.nn as nn
model = mobilenetv3(n_class=8, blocknum=6)
model = torch.nn.DataParallel(model)
device = torch.device('cpu')
checkpoint = torch.load('output/All/48860_model=MobilenetV3-ep=3000-block=6-class=8/model_best.pth.tar', map_location = device)
model.load_state_dict(checkpoint['state_dict'])
model.to(device)
model.eval()
x = torch.randn(256,1,224,224)
print(x.shape)
jit_model = torch.jit.trace(model.module,x)
jit_model.save("mobilenetv3.pt")
#check jitModel is working
#output = jit_model(torch.ones(3,1,224,224))
#print(output)
\ No newline at end of file
import torch
import torch.nn as nn
from torch.nn import functional as F
import math
from utils import stochastic_depth
############# Mobile Net V3 #############
def make_divisible(x, divisible_by=8):
import numpy as np
return int(np.ceil(x * 1. / divisible_by) * divisible_by)
def conv_bn(inp, oup, stride, conv_layer=nn.Conv2d, norm_layer=nn.BatchNorm2d, nlin_layer=nn.ReLU):
return nn.Sequential(
conv_layer(inp, oup, 3, stride, 1, bias=False),
norm_layer(oup),
nlin_layer(inplace=True)
)
# without bn
def conv_block(inp, oup, stride, conv_layer=nn.Conv2d, nlin_layer=nn.ReLU):
return nn.Sequential(
conv_layer(inp, oup, 3, stride, 1, bias=False),
nlin_layer(0.1, inplace=True)
)
def trans_conv_block(inp, oup, stride, conv_layer=nn.ConvTranspose2d, nlin_layer=nn.ReLU):
return nn.Sequential(
conv_layer(inp, oup, 2, 2), #Transpose convolution을 통하여 가로세로 2배로.
nlin_layer(0.1, inplace=True)
)
def conv_1x1_bn(inp, oup, conv_layer=nn.Conv2d, norm_layer=nn.BatchNorm2d, nlin_layer=nn.ReLU):
return nn.Sequential(
conv_layer(inp, oup, 1, 1, 0, bias=False),
norm_layer(oup),
nlin_layer(inplace=True)
)
class Hswish(nn.Module):
def __init__(self, inplace=True):
super(Hswish, self).__init__()
self.inplace = inplace
def forward(self, x):
return x * F.relu6(x + 3., inplace=self.inplace) / 6.
class Hsigmoid(nn.Module):
def __init__(self, inplace=True):
super(Hsigmoid, self).__init__()
self.inplace = inplace
def forward(self, x):
return F.relu6(x + 3., inplace=self.inplace) / 6.
class SEModule(nn.Module):
def __init__(self, channel, reduction=4):
super(SEModule, self).__init__()
self.avg_pool = nn.AdaptiveAvgPool2d(1)
self.fc = nn.Sequential(
nn.Linear(channel, channel // reduction, bias=False),
nn.ReLU(inplace=True),
nn.Linear(channel // reduction, channel, bias=False),
Hsigmoid()
# nn.Sigmoid()
)
def forward(self, x):
b, c, _, _ = x.size()
y = self.avg_pool(x).view(b, c)
y = self.fc(y).view(b, c, 1, 1)
return x * y.expand_as(x)
class Identity(nn.Module):
def __init__(self, channel):
super(Identity, self).__init__()
def forward(self, x):
return x
class MobileBottleneck(nn.Module):
def __init__(self, inp, oup, kernel, stride, exp, se=False, nl='RE', stochastic=False, block_ratio=None):
super(MobileBottleneck, self).__init__()
assert stride in [1, 2]
assert kernel in [3, 5]
padding = (kernel - 1) // 2
self.use_res_connect = stride == 1 and inp == oup
self.use_stochastic = stochastic
self.block_ratio = block_ratio
conv_layer = nn.Conv2d
norm_layer = nn.BatchNorm2d
if nl == 'RE':
nlin_layer = nn.ReLU # or ReLU6
elif nl == 'HS':
nlin_layer = Hswish
else:
raise NotImplementedError
if se:
SELayer = SEModule
else:
SELayer = Identity
self.conv = nn.Sequential(
# pw
conv_layer(inp, exp, 1, 1, 0, bias=False),
norm_layer(exp),
nlin_layer(inplace=True),
# dw
conv_layer(exp, exp, kernel, stride, padding, groups=exp, bias=False),
norm_layer(exp),
SELayer(exp),
nlin_layer(inplace=True),
# pw-linear
conv_layer(exp, oup, 1, 1, 0, bias=False),
norm_layer(oup),
)
def forward(self, x):
if self.use_res_connect:
if self.use_stochastic:
return x + stochastic_depth(self.conv(x),self.training, 0.2 * self.block_ratio)
else:
return x + self.conv(x)
else:
if self.use_stochastic:
return stochastic_depth(self.conv(x), self.training, 0.2 * self.block_ratio)
else:
return self.conv(x)
class MobileNetV3(nn.Module):
def __init__(self, n_class, input_size=64, dropout=0.8, width_mult=1.0, blocknum=4, stochastic=False):
super(MobileNetV3, self).__init__()
input_channel = 16
last_channel = 1280
self.mobileblock = [
# k, exp, c, se, nl, s,
[3, 16, 16, True, 'RE', 2],
[3, 72, 24, False, 'RE', 2],
[3, 88, 24, False, 'RE', 1],
[5, 96, 40, True, 'HS', 2],
[5, 240, 40, True, 'HS', 1],
[5, 240, 40, True, 'HS', 1],
[5, 120, 48, True, 'HS', 1],
[5, 144, 48, True, 'HS', 1],
[5, 288, 96, True, 'HS', 2],
[5, 576, 96, True, 'HS', 1],
[5, 576, 96, True, 'HS', 1],
]
mobile_setting = [
self.mobileblock[idx] for idx in range(blocknum)
]
self.last_exp = self.mobileblock[blocknum-1][1]
# building first layer
assert input_size % 32 == 0
last_channel = make_divisible(last_channel * width_mult) if width_mult > 1.0 else last_channel
self.features = [conv_bn(1, input_channel, 2, nlin_layer=Hswish)] #input channel change
self.classifier = []
# building mobile blocks
for idx, (k, exp, c, se, nl, s) in enumerate(mobile_setting):
output_channel = make_divisible(c * width_mult)
exp_channel = make_divisible(exp * width_mult)
self.features.append(MobileBottleneck(input_channel, output_channel, k, s, exp_channel, se, nl, stochastic=stochastic, block_ratio=float(idx) / float(blocknum)))
input_channel = output_channel
# building last several layers
last_conv = make_divisible(self.last_exp * width_mult)
self.features.append(conv_1x1_bn(input_channel, last_conv, nlin_layer=Hswish))
# self.features.append(SEModule(last_conv)) # refer to paper Table2, but I think this is a mistake
self.features.append(nn.AdaptiveAvgPool2d(1))
self.features.append(nn.Conv2d(last_conv, last_channel, 1, 1, 0))
self.features.append(Hswish(inplace=True))
# make it nn.Sequential
self.features = nn.Sequential(*self.features)
# building classifier
self.classifier = nn.Sequential(
nn.Dropout(p=dropout), # refer to paper section 6
nn.Linear(last_channel, n_class),
)
self._initialize_weights()
def forward(self, x):
x = self.features(x)
x = x.mean(3).mean(2)
x = self.classifier(x)
return x
def _initialize_weights(self):
# weight initialization
for m in self.modules():
if isinstance(m, nn.Conv2d):
nn.init.kaiming_normal_(m.weight, mode='fan_out')
if m.bias is not None:
nn.init.zeros_(m.bias)
elif isinstance(m, nn.BatchNorm2d):
nn.init.ones_(m.weight)
nn.init.zeros_(m.bias)
elif isinstance(m, nn.Linear):
nn.init.normal_(m.weight, 0, 0.01)
if m.bias is not None:
nn.init.zeros_(m.bias)
def mobilenetv3(pretrained=False, **kwargs):
model = MobileNetV3(**kwargs)
if pretrained:
state_dict = torch.load('mobilenetv3_small_67.4.pth.tar')
model.load_state_dict(state_dict, strict=True)
# raise NotImplementedError
return model
### EFFICIENT NET ###
from utils import (
round_filters,
round_repeats,
drop_connect,
get_same_padding_conv2d,
get_model_params,
efficientnet_params,
load_pretrained_weights,
Swish,
MemoryEfficientSwish,
)
class MBConvBlock(nn.Module):
"""
Mobile Inverted Residual Bottleneck Block
Args:
block_args (namedtuple): BlockArgs, see above
global_params (namedtuple): GlobalParam, see above
Attributes:
has_se (bool): Whether the block contains a Squeeze and Excitation layer.
"""
def __init__(self, block_args, global_params):
super().__init__()
self._block_args = block_args
self._bn_mom = 1 - global_params.batch_norm_momentum
self._bn_eps = global_params.batch_norm_epsilon
self.has_se = (self._block_args.se_ratio is not None) and (0 < self._block_args.se_ratio <= 1)
self.id_skip = block_args.id_skip # skip connection and drop connect
# Get static or dynamic convolution depending on image size
Conv2d = get_same_padding_conv2d(image_size=global_params.image_size)
# Expansion phase
inp = self._block_args.input_filters # number of input channels
oup = self._block_args.input_filters * self._block_args.expand_ratio # number of output channels
if self._block_args.expand_ratio != 1:
self._expand_conv = Conv2d(in_channels=inp, out_channels=oup, kernel_size=1, bias=False)
self._bn0 = nn.BatchNorm2d(num_features=oup, momentum=self._bn_mom, eps=self._bn_eps)
# Depthwise convolution phase
k = self._block_args.kernel_size
s = self._block_args.stride
self._depthwise_conv = Conv2d(
in_channels=oup, out_channels=oup, groups=oup, # groups makes it depthwise
kernel_size=k, stride=s, bias=False)
self._bn1 = nn.BatchNorm2d(num_features=oup, momentum=self._bn_mom, eps=self._bn_eps)
# Squeeze and Excitation layer, if desired
if self.has_se:
num_squeezed_channels = max(1, int(self._block_args.input_filters * self._block_args.se_ratio))
self._se_reduce = Conv2d(in_channels=oup, out_channels=num_squeezed_channels, kernel_size=1)
self._se_expand = Conv2d(in_channels=num_squeezed_channels, out_channels=oup, kernel_size=1)
# Output phase
final_oup = self._block_args.output_filters
self._project_conv = Conv2d(in_channels=oup, out_channels=final_oup, kernel_size=1, bias=False)
self._bn2 = nn.BatchNorm2d(num_features=final_oup, momentum=self._bn_mom, eps=self._bn_eps)
self._swish = MemoryEfficientSwish()
def forward(self, inputs, drop_connect_rate=None):
"""
:param inputs: input tensor
:param drop_connect_rate: drop connect rate (float, between 0 and 1)
:return: output of block
"""
# Expansion and Depthwise Convolution
x = inputs
if self._block_args.expand_ratio != 1:
x = self._swish(self._bn0(self._expand_conv(inputs)))
x = self._swish(self._bn1(self._depthwise_conv(x)))
# Squeeze and Excitation
if self.has_se:
x_squeezed = F.adaptive_avg_pool2d(x, 1)
x_squeezed = self._se_expand(self._swish(self._se_reduce(x_squeezed)))
x = torch.sigmoid(x_squeezed) * x
x = self._bn2(self._project_conv(x))
# Skip connection and drop connect
input_filters, output_filters = self._block_args.input_filters, self._block_args.output_filters
if self.id_skip and self._block_args.stride == 1 and input_filters == output_filters:
if drop_connect_rate:
x = drop_connect(x, p=drop_connect_rate, training=self.training)
x = x + inputs # skip connection
return x
def set_swish(self, memory_efficient=True):
"""Sets swish function as memory efficient (for training) or standard (for export)"""
self._swish = MemoryEfficientSwish() if memory_efficient else Swish()
class EfficientNet(nn.Module):
"""
An EfficientNet model. Most easily loaded with the .from_name or .from_pretrained methods
Args:
blocks_args (list): A list of BlockArgs to construct blocks
global_params (namedtuple): A set of GlobalParams shared between blocks
Example:
model = EfficientNet.from_pretrained('efficientnet-b0')
"""
def __init__(self, blocks_args=None, global_params=None):
super().__init__()
assert isinstance(blocks_args, list), 'blocks_args should be a list'
assert len(blocks_args) > 0, 'block args must be greater than 0'
self._global_params = global_params
self._blocks_args = blocks_args
# Get static or dynamic convolution depending on image size
Conv2d = get_same_padding_conv2d(image_size=global_params.image_size)
# Batch norm parameters
bn_mom = 1 - self._global_params.batch_norm_momentum
bn_eps = self._global_params.batch_norm_epsilon
# Stem
in_channels = 1 # rgb
out_channels = round_filters(32, self._global_params) # number of output channels
self._conv_stem = Conv2d(in_channels, out_channels, kernel_size=3, stride=2, bias=False)
self._bn0 = nn.BatchNorm2d(num_features=out_channels, momentum=bn_mom, eps=bn_eps)
# Build blocks
self._blocks = nn.ModuleList([])
for block_args in self._blocks_args:
# Update block input and output filters based on depth multiplier.
block_args = block_args._replace(
input_filters=round_filters(block_args.input_filters, self._global_params),
output_filters=round_filters(block_args.output_filters, self._global_params),
num_repeat=round_repeats(block_args.num_repeat, self._global_params)
)
# The first block needs to take care of stride and filter size increase.
self._blocks.append(MBConvBlock(block_args, self._global_params))
if block_args.num_repeat > 1:
block_args = block_args._replace(input_filters=block_args.output_filters, stride=1)
for _ in range(block_args.num_repeat - 1):
self._blocks.append(MBConvBlock(block_args, self._global_params))
# Head
in_channels = block_args.output_filters # output of final block
out_channels = round_filters(1280, self._global_params)
self._conv_head = Conv2d(in_channels, out_channels, kernel_size=1, bias=False)
self._bn1 = nn.BatchNorm2d(num_features=out_channels, momentum=bn_mom, eps=bn_eps)
# Final linear layer
self._avg_pooling = nn.AdaptiveAvgPool2d(1)
self._dropout = nn.Dropout(self._global_params.dropout_rate)
self._fc = nn.Linear(out_channels, self._global_params.num_classes)
self._swish = MemoryEfficientSwish()
def set_swish(self, memory_efficient=True):
"""Sets swish function as memory efficient (for training) or standard (for export)"""
self._swish = MemoryEfficientSwish() if memory_efficient else Swish()
for block in self._blocks:
block.set_swish(memory_efficient)
def extract_features(self, inputs):
""" Returns output of the final convolution layer """
# Stem
x = self._swish(self._bn0(self._conv_stem(inputs)))
# Blocks
for idx, block in enumerate(self._blocks):
drop_connect_rate = self._global_params.drop_connect_rate
if drop_connect_rate:
drop_connect_rate *= float(idx) / len(self._blocks)
x = block(x, drop_connect_rate=drop_connect_rate)
# Head
x = self._swish(self._bn1(self._conv_head(x)))
return x
def forward(self, inputs):
""" Calls extract_features to extract features, applies final linear layer, and returns logits. """
bs = inputs.size(0)
# Convolution layers
x = self.extract_features(inputs)
# Pooling and final linear layer
x = self._avg_pooling(x)
x = x.view(bs, -1)
x = self._dropout(x)
x = self._fc(x)
return x
@classmethod
def from_name(cls, model_name, override_params=None):
cls._check_model_name_is_valid(model_name)
blocks_args, global_params = get_model_params(model_name, override_params)
return cls(blocks_args, global_params)
@classmethod
def from_pretrained(cls, model_name, advprop=False, num_classes=1000, in_channels=3):
model = cls.from_name(model_name, override_params={'num_classes': num_classes})
load_pretrained_weights(model, model_name, load_fc=(num_classes == 1000), advprop=advprop)
if in_channels != 3:
Conv2d = get_same_padding_conv2d(image_size = model._global_params.image_size)
out_channels = round_filters(32, model._global_params)
model._conv_stem = Conv2d(in_channels, out_channels, kernel_size=3, stride=2, bias=False)
return model
@classmethod
def get_image_size(cls, model_name):
cls._check_model_name_is_valid(model_name)
_, _, res, _ = efficientnet_params(model_name)
return res
@classmethod
def _check_model_name_is_valid(cls, model_name):
""" Validates model name. """
valid_models = ['efficientnet-b'+str(i) for i in range(9)]
if model_name not in valid_models:
raise ValueError('model_name should be one of: ' + ', '.join(valid_models))
class AutoEncoder(nn.Module):
def __init__(self, input_channel=1):
super(AutoEncoder, self).__init__()
self.input_channel = input_channel
self.encoder, self.decoder = self.make_encoder_layers()
def make_encoder_layers(self, input_channel=1,layer_num=6):
encoder_output_channels = [64,56,48,32,24,20]
decoder_output_channels = [24,32,48,56,64,1]
encoder = []
decoder = []
#make encoder
for i in range(layer_num):
encoder.append(conv_block(input_channel, encoder_output_channels[i], 1, nlin_layer=nn.LeakyReLU))
encoder.append(conv_block(encoder_output_channels[i], encoder_output_channels[i],1,nlin_layer=nn.LeakyReLU))
encoder.append(nn.MaxPool2d(2,2))
if(i != layer_num-1):
encoder.append(nn.Dropout2d(p=0.3))
input_channel = encoder_output_channels[i]
#make decoder
for i in range(layer_num):
decoder.append(nn.Upsample(scale_factor=2))
decoder.append(conv_block(input_channel, input_channel, 1, nlin_layer=nn.LeakyReLU))
decoder.append(conv_block(input_channel, decoder_output_channels[i], 1, nlin_layer=nn.LeakyReLU))
if(i != layer_num-1):
decoder.append(nn.Dropout2d(p=0.3))
input_channel = decoder_output_channels[i]
encoder = nn.Sequential(*encoder)
decoder = nn.Sequential(*decoder)
return encoder, decoder
def forward(self, x):
x = self.encoder(x)
print(x.shape)
x = self.decoder(x)
return x
class AutoEncoder_s(nn.Module):
def __init__(self, input_channel=1):
super(AutoEncoder, self).__init__()
self.input_channel = input_channel
self.encoder, self.decoder = self.make_encoder_layers()
def make_encoder_layers(self, input_channel=1,layer_num=3):
encoder_output_channels = [64,56,48]
decoder_output_channels = [56,64,1]
encoder = []
decoder = []
#make encoder
for i in range(layer_num):
encoder.append(conv_block(input_channel, encoder_output_channels[i], 1, nlin_layer=nn.LeakyReLU))
encoder.append(conv_block(encoder_output_channels[i], encoder_output_channels[i],1,nlin_layer=nn.LeakyReLU))
encoder.append(nn.MaxPool2d(2,2))
if(i != layer_num-1):
encoder.append(nn.Dropout2d(p=0.3))
input_channel = encoder_output_channels[i]
#make decoder
for i in range(layer_num):
decoder.append(nn.Upsample(scale_factor=2))
decoder.append(conv_block(input_channel, input_channel, 1, nlin_layer=nn.LeakyReLU))
decoder.append(conv_block(input_channel, decoder_output_channels[i], 1, nlin_layer=nn.LeakyReLU))
if(i != layer_num-1):
decoder.append(nn.Dropout2d(p=0.3))
input_channel = decoder_output_channels[i]
encoder = nn.Sequential(*encoder)
decoder = nn.Sequential(*decoder)
return encoder, decoder
def forward(self, x):
x = self.encoder(x)
print(x.shape)
x = self.decoder(x)
return x
class pytorch_autoencoder(nn.Module):
def __init__(self):
super(pytorch_autoencoder, self).__init__()
self.encoder = nn.Sequential(
nn.Conv2d(1, 32, 2, stride=2, padding=0),
nn.ReLU(True),
nn.MaxPool2d(2, stride=2),
nn.Conv2d(32, 16, 2, stride=2, padding=0),
nn.ReLU(True),
)
self.decoder = nn.Sequential(
nn.ConvTranspose2d(16, 32, 2, stride=2),
nn.ReLU(True),
nn.ConvTranspose2d(32, 64, 2, stride=2, padding=0),
nn.ReLU(True),
nn.ConvTranspose2d(64, 1, 2, stride=2, padding=0),
nn.Tanh()
)
def forward(self, x):
x = self.encoder(x)
x = self.decoder(x)
return x
\ No newline at end of file
adabound==0.0.5
altgraph==0.17
click==7.1.1
cycler==0.10.0
future==0.18.2
kiwisolver==1.1.0
lxml==4.5.0
macholib==1.14
matplotlib==2.2.4
numpy==1.18.1
opencv-python==4.2.0.32
pandas==1.0.3
pefile==2019.4.18
Pillow==6.2.2
protobuf==3.11.3
PyInstaller==3.6
pyparsing==2.4.6
PyQt5==5.14.1
PyQt5-sip==12.7.1
python-dateutil==2.8.1
pytz==2019.3
pywin32-ctypes==0.2.0
PyYAML==5.3.1
scipy==1.4.1
six==1.14.0
tensorboard-logger==0.1.0
torch==1.4.0+cpu
torchvision==0.2.2.post3
tqdm==4.44.1
import argparse
import csv
import logging
import os
import shutil
import time
import sys
import zipfile
from torch.utils.data.sampler import SubsetRandomSampler
import numpy as np
import PIL
import torch
import torch.backends.cudnn as cudnn
import torch.nn as nn
import torch.nn.parallel
import torch.optim
import torch.utils.data
import torchvision.datasets as datasets
import torchvision.transforms as transforms
import torch.nn.functional as F
import cv2
import matplotlib.pyplot as plt
import pandas as pd
from get_mean_std import get_params
sys.path.append(os.path.join(os.path.dirname(__name__)))
from model import mobilenetv3, EfficientNet
from torchvision.utils import save_image
from focal_loss import FocalLoss
from visualize.grad_cam import make_grad_cam
from utils import accuracy, AverageMeter, get_args_from_yaml, MyImageFolder, printlog, FastDataLoader
from PIL import Image
import torchvision.transforms.functional as TF
#from utils import restapi, preprocessing
global error_case_idx, correct_case_idx
logger = logging.getLogger(os.path.dirname(__name__))
logger.setLevel(logging.INFO)
streamHandler = logging.StreamHandler()
logger.addHandler(streamHandler)
def make_type_dir():
if not os.path.exists('test_result'):
os.mkdir('test_result')
if not os.path.exists('test_result/Type'):
os.mkdir('test_result/Type')
if not os.path.exists('test_result/Type/Double'):
os.mkdir('test_result/Type/Double')
if not os.path.exists('test_result/Type/Flip'):
os.mkdir('test_result/Type/Flip')
if not os.path.exists('test_result/Type/Scratch'):
os.mkdir('test_result/Type/Scratch')
if not os.path.exists('test_result/Type/Leave'):
os.mkdir('test_result/Type/Leave')
if not os.path.exists('test_result/Type/Empty'):
os.mkdir('test_result/Type/Empty')
if not os.path.exists('test_result/Type/Crack'):
os.mkdir('test_result/Type/Crack')
def make_all_dir():
if not os.path.exists('test_result'):
os.mkdir('test_result')
if not os.path.exists('test_result/All'):
os.mkdir('test_result/All')
if not os.path.exists('test_result/All/Double'):
os.mkdir('test_result/All/Double')
if not os.path.exists('test_result/All/Flip'):
os.mkdir('test_result/All/Flip')
if not os.path.exists('test_result/All/Scratch'):
os.mkdir('test_result/All/Scratch')
if not os.path.exists('test_result/All/Leave'):
os.mkdir('test_result/All/Leave')
if not os.path.exists('test_result/All/Normal'):
os.mkdir('test_result/All/Normal')
if not os.path.exists('test_result/All/Empty'):
os.mkdir('test_result/All/Empty')
if not os.path.exists('test_result/All/Crack'):
os.mkdir('test_result/All/Crack')
if not os.path.exists('test_result/All/Normal'):
os.mkdir('test_result/All/Normal')
def make_error_dir():
if not os.path.exists('test_result'):
os.mkdir('test_result')
if not os.path.exists('test_result/Error'):
os.mkdir('test_result/Error')
if not os.path.exists('test_result/Error/Normal'):
os.mkdir('test_result/Error/Normal')
if not os.path.exists('test_result/Error/Error'):
os.mkdir('test_result/Error/Error')
def get_savepath_classes_args(mode):
if mode == "Error":
save_path = './test_result/Error'
classes = ['Error', 'Normal']
args = get_args_from_yaml("configs/Error_config.yml")
elif mode == "Type":
save_path = './test_result/Type'
classes = ['Crack', 'Double', 'Empty', 'Flip', 'Leave','Pollute', 'Scratch']
args = get_args_from_yaml('configs/ErrorType_config.yml')
else:
save_path = './test_result/All'
classes = ['Crack','Double', 'Empty', 'Flip', 'Leave', 'Normal','Pollute', 'Scratch']
args = get_args_from_yaml('configs/All_config.yml')
return save_path, classes, args
# 여러개의 인풋을 Test 수행할 때 사용되는 함수.
def test(testloader, model, mode):
with torch.no_grad():
save_path, classes, _ = get_savepath_classes_args(mode)
model.eval()
for _, data in enumerate(testloader):
(input, _), (path, _) = data
if torch.cuda.is_available():
input = input.cuda()
output = model(input)
prob = F.softmax(output, dim=1)
for idx, p in enumerate(prob):
values = torch.topk(p,2).values.tolist()
indices = torch.topk(p,2).indices.tolist()
img = cv2.imread(path[idx])
cv2.imwrite(f"{save_path}/{classes[indices[0]]}/{classes[indices[0]]}={values[0]}__{classes[indices[1]]}={values[1]}.bmp", img)
# Test input이 하나의 파일일 때 사용되는 함수.
# Path = 데이터 원본의 경로, mode = 수행하는 Task.
def single_file_test(input, model, path, mode, q):
with torch.no_grad():
save_path, classes, _ = get_savepath_classes_args(mode)
model.eval()
if torch.cuda.is_available():
input = input.cuda()
start = time.time()
output = model(input)
prob = F.softmax(output, dim=1)
q.put(f"Inference time 1 image : {str(round(time.time() - start , 5))}")
for idx, p in enumerate(prob):
values = torch.topk(p,2).values.tolist() # 확률
indices = torch.topk(p,2).indices.tolist() # 인덱스
img = cv2.imread(path)
cv2.imwrite(f"{save_path}/{classes[indices[0]]}/{classes[indices[0]]}={values[0]}__{classes[indices[1]]}={values[1]}.bmp", img)
# 유저가 지정해준 checkpoint가 없으면 config 에 있는 checkpoint를 사용.
# data는 config에 지정된 data를 활용.
def UI_validate(mode, q, **kwargs):
try:
_, _, args = get_savepath_classes_args(mode)
args['model']['blocks'] = kwargs['blocknum']
args['data']['val'] = kwargs["data_path"]
q.put(f"using user's checkpoint {kwargs['ck_path']}")
args['checkpoint'] = kwargs['ck_path']
timestring = time.strftime('%Y-%m-%d-%H-%M-%S', time.localtime(time.time()))
args['id'] = "validate_" + timestring
main(args, q)
except Exception as ex:
q.put(f"실행 중 에러가 발생하였습니다. 자세한 사항은 보시려면 로그를 확인해 주세요.")
logger.info(ex)
# test는 항상 유저가 지정해주는 data를 활용.
# 만약 없다면 demo 즉 UI단에서 지정한 default data를 활용.
# mode: Error, ErrorType, All
# path: data path
# test_mode: File or dir
def UI_test(mode, path, test_mode, q, **kwargs):
try:
_, _, args = get_savepath_classes_args(mode)
make_error_dir()
make_type_dir()
make_all_dir()
args['model']['blocks'] = kwargs['blocknum']
args['train']['size'] = kwargs['size']
q.put(f"using user's checkpoint {kwargs['ck_path']}")
args['checkpoint'] = kwargs['ck_path']
timestring = time.strftime('%Y-%m-%d-%H-%M-%S', time.localtime(time.time()))
args['id'] = "test_" + timestring
gpus = args['gpu']
resize_size = args['train']['size']
model = mobilenetv3(n_class=args['model']['class'], blocknum=args['model']['blocks'])
if torch.cuda.is_available():
torch.cuda.set_device(gpus[0])
with torch.cuda.device(gpus[0]):
model = model.cuda()
model = torch.nn.DataParallel(model, device_ids=gpus, output_device=gpus[0])
else:
model = torch.nn.DataParallel(model)
device = torch.device("cpu")
model.to(device)
q.put("loading checkpoint...")
if torch.cuda.is_available():
checkpoint = torch.load(args['checkpoint'])
else:
checkpoint = torch.load(args['checkpoint'],map_location=torch.device('cpu'))
model.load_state_dict(checkpoint['state_dict'])
q.put("checkpoint already loaded!")
q.put("start test")
# 테스트 데이터가 디렉토일 경우.
# 다중 데이터를 받아야 하므로 Pytorch에서 구성된 Dataloader 이용.
# 해당 코드에서는 Data의 Path까지 출력해주는 Loader을 추가로 구성함. (저장하기 위하여)
if test_mode == 'dir':
normalize = transforms.Normalize(mean=[0.4015], std=[0.2165])
transform_test = transforms.Compose([
transforms.Resize((resize_size,resize_size)),
transforms.Grayscale(),
transforms.ToTensor(),
normalize
])
q.put(f"data path directory is {path}")
testset = MyImageFolder(path, transform=transform_test)
test_loader = FastDataLoader(testset, batch_size=args['predict']['batch-size'], shuffle=False, num_workers=8)
start = time.time()
test(test_loader, model, mode)
q.put(f"Inference time {len(testset)} images : {str(round(time.time() - start , 5))}")
# 테스트 데이터가 하나의 파일일 경우.
# 하나의 데이터이므로 바로 이미지를 텐서로 바꿈.
# Dataloader에서는 transforms.compose로 데이터 Preprocessing을 묶었지만
# 여기서는 Dataloader를 사용하지 않기 때문에 transforms.functional을 이용하여 직접 변경.
# trasnforms 함수와 같은 형태를 가지고 있기 때문에 쉽게 이해 가능.
else:
image = Image.open(path)
x = TF.resize(image, (resize_size,resize_size)) # 리사이즈
x = TF.to_grayscale(x) # 그레이스케일 적용
x = TF.to_tensor(x) # 텐서 변환.
x.unsqueeze_(0) # 0-dim에 차원 추가.
start = time.time()
single_file_test(x, model, path, mode, q)
q.put(f"Inference time 1 image : {str(round(time.time() - start , 5))}")
q.put('finish test')
except Exception as ex:
q.put("실행 중 에러가 발생하였습니다. 자세한 사항은 보시려면 로그를 확인해 주세요.")
logger.info(ex)
def UI_temp(path,q,model):
try:
resize_size = 64
image = Image.open(path)
x = TF.resize(image, (resize_size, resize_size))
x = TF.to_grayscale(x)
x = TF.to_tensor(x)
x.unsqueeze_(0)
single_file_test(x, model, path, "Error", q)
q.put('temp test finish')
except Exception as ex:
q.put("실행 중 에러가 발생하였습니다. 자세한 사항은 보시려면 로그를 확인해 주세요.")
logger.info(ex)
def UI_temp2():
batch_size = 256
train_transforms = transforms.Compose([
transforms.Resize((256,256)),
transforms.ToTensor()
])
train_dataset = datasets.ImageFolder("../data/Fifth_data/All", train_transforms)
train_loader = FastDataLoader(dataset=train_dataset,
batch_size=batch_size, shuffle=True)
i=0
start = time.time()
for x,y in train_loader:
i = i+1
pass
end = time.time()
print((end - start)/i)
def main(args, q=None):
try:
logdir = f'logs/runs/{args["task"]}/'
if not os.path.exists(logdir):
os.makedirs(logdir)
fileHandler = logging.FileHandler(logdir + f'{args["id"]}.log')
logger.addHandler(fileHandler)
# 2. eval
run_model(args, q)
# 3. Done
printlog(f"[{args['id']}] done", logger, q)
except Exception as ex:
printlog("실행 중 에러가 발생하였습니다. 자세한 사항은 보시려면 로그를 확인해 주세요.", logger, q)
logger.info(ex)
def run_model(args, q=None):
resize_size = args['train']['size']
gpus = args['gpu']
mean, std = get_params(args['data']['val'], resize_size)
normalize = transforms.Normalize(mean=[mean[0].item()],
std=[std[0].item()])
normalize_factor = [mean, std]
# data loader
transform_test = transforms.Compose([
transforms.Resize((resize_size,resize_size)),
transforms.Grayscale(),
transforms.ToTensor(),
normalize
])
kwargs = {'num_workers': args['predict']['worker'], 'pin_memory': True}
test_data = MyImageFolder(args['data']['val'], transform_test)
random_seed = 10
validation_ratio = 0.1
num_test = len(test_data)
indices = list(range(num_test))
split = int(np.floor(validation_ratio * num_test))
np.random.seed(random_seed)
np.random.shuffle(indices)
valid_idx = indices[:split]
valid_sampler = SubsetRandomSampler(valid_idx)
val_loader = FastDataLoader(
test_data, batch_size=args['predict']['batch-size'], sampler=valid_sampler,
**kwargs)
criterion = nn.CrossEntropyLoss()
# load model
model = mobilenetv3(n_class=args['model']['class'], blocknum=args['model']['blocks'])
# get the number of model parameters
logger.info('Number of model parameters: {}'.format(
sum([p.data.nelement() for p in model.parameters()])))
if torch.cuda.is_available():
torch.cuda.set_device(gpus[0])
with torch.cuda.device(gpus[0]):
model = model.cuda()
criterion = criterion.cuda()
model = torch.nn.DataParallel(model, device_ids=gpus, output_device=gpus[0])
else:
model = torch.nn.DataParallel(model)
device = torch.device("cpu")
model.to(device)
criterion.to(device)
logger.info("=> loading checkpoint '{}'".format(args['checkpoint']))
if torch.cuda.is_available():
checkpoint = torch.load(args['checkpoint'])
else:
checkpoint = torch.load(args['checkpoint'], map_location=torch.device('cpu'))
args['start_epoch'] = checkpoint['epoch']
best_prec1 = checkpoint['best_prec1']
model.load_state_dict(checkpoint['state_dict'])
logger.info("=> loaded checkpoint '{}' (epoch {})"
.format(args['checkpoint'], checkpoint['epoch']))
cudnn.benchmark = True
# define loss function (option 2)
#criterion = FocalLoss(
# gamma=args['loss']['gamma'], alpha=args['loss']['alpha']).cuda()
# evaluate on validation set
prec1 = validate(val_loader, model, criterion, normalize_factor, args ,q)
# remember best prec@1 and save checkpoint
best_prec1 = max(prec1, best_prec1)
logger.info(f'Best accuracy: {best_prec1}')
def validate(val_loader, model, criterion, normalize_factor, args, q):
"""Perform validation on the validation set"""
with torch.no_grad():
batch_time = AverageMeter()
losses = AverageMeter()
top1 = AverageMeter()
# switch to evaluate mode
model.eval()
end = time.time()
for i, data in enumerate(val_loader):
(input, target), (path, _) = data
if torch.cuda.is_available():
target = target.cuda()
input = input.cuda()
# compute output
output = model(input)
loss = criterion(output, target)
# measure accuracy and record loss
prec1 = accuracy(output.data, target, topk=(1,))[0]
#save error case
#save correct = 맞은거 까지 저장하는지 마는지.
if args['predict']['save']:
save_error_case(output.data, target, path, args, topk=(1,), input=input, save_correct=False)
losses.update(loss.item(), input.size(0))
top1.update(prec1.item(), input.size(0))
# measure elapsed time
batch_time.update(time.time() - end)
end = time.time()
if i % 1 == 0:
logger.info('Test: [{0}/{1}]\t'
'Time {batch_time.val:.3f} ({batch_time.avg:.3f})\t'
'Loss {loss.val:.4f} ({loss.avg:.4f})\t'
'Prec@1 {top1.val:.3f} ({top1.avg:.3f})'.format(
i, len(val_loader), batch_time=batch_time, loss=losses,
top1=top1))
printlog(' * Prec@1 {top1.avg:.3f}'.format(top1=top1), logger, q)
if args["predict"]["cam"]:
logger.info("Creating CAM")
#print grad cam
if args['predict']['normalize']:
make_grad_cam(f"eval_results/{args['task']}/error_case",
f"eval_results/{args['task']}/error_case/cam" , model, normalize_factor, cam_class=args['predict']['cam-class'], args=args)
else:
make_grad_cam(f"eval_results/{args['task']}/error_case",
f"eval_results/{args['task']}/error_case/cam" , model, normalize_factor=None, cam_class=args['predict']['cam-class'], args = args)
return top1.avg
def save_error_case(output, target, path, args , topk=(1,), input=None, save_correct=False):
global error_case_idx, correct_case_idx
error_case_idx = 0
correct_case_idx = 0
_, class_arr, _ = get_savepath_classes_args(args['task'])
p = F.softmax(output, dim=1)
values = torch.topk(p,2).values.tolist()
indices = torch.topk(p,2).indices.tolist()
maxk = max(topk)
batch_size = target.size(0)
_, pred = output.topk(maxk, 1, True, True)
pred = pred.t()
correct = pred.eq(target.view(1, -1).expand_as(pred))
pred = pred.view(batch_size)
correct = correct.view(batch_size)
if not os.path.exists(f'eval_results'):
os.mkdir(f'eval_results')
if not os.path.exists(f"eval_results/{args['task']}"):
os.mkdir(f"eval_results/{args['task']}")
if not os.path.exists(f"eval_results/{args['task']}/error_case"):
os.mkdir(f"eval_results/{args['task']}/error_case")
if not os.path.exists(f"eval_results/{args['task']}/correct_case") and save_correct:
os.mkdir(f"eval_results/{args['task']}/correct_case")
for idx, correct_element in enumerate(correct):
# 틀린 경우
if correct_element.item() == 0:
#save_image(input[idx], f"eval_results/{args['task']}/error_case/idx_{error_case_idx}_label_{target[idx]}_pred_{pred[idx]}.bmp")
img = cv2.imread(path[idx])
cv2.imwrite(f"eval_results/{args['task']}/error_case/idx_{error_case_idx}_label_{class_arr[target[idx]]}_pred_{class_arr[indices[idx][0]]}={round(values[idx][0]*100,1)}_{class_arr[indices[idx][1]]}={round(values[idx][1]*100,1)}_real.bmp" ,img)
error_case_idx = error_case_idx + 1
# 맞는 경우에도 저장.
if save_correct and correct_element.item() == 1:
#save_image(input[idx], f"eval_results/{args['task']}/correct_case/idx_{correct_case_idx}_label_{target[idx]}.bmp")
img = cv2.imread(path[idx])
cv2.imwrite(f"eval_results/{args['task']}/correct_case/idx_{correct_case_idx}_label_{class_arr[target[idx]]}_real.bmp" ,img)
correct_case_idx = correct_case_idx + 1
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument("--config", default="configs/Error_config.yml", help="train config file") #config 파일을 디폴트로 받음.
args = parser.parse_args()
if args.config == 'Error':
args = get_args_from_yaml("configs/Error_config.yml")
elif args.config == 'Type':
args = get_args_from_yaml('configs/ErrorType_config.yml')
else:
args = get_args_from_yaml('configs/All_config.yml')
args['id'] = 'eval'
main(args)
import argparse
import datetime
import logging
import os
import shutil
import time
import numpy as np
import PIL
import torch
import torch.backends.cudnn as cudnn
import torch.nn as nn
import torch.nn.parallel
import torch.optim
import adabound
import torch.utils.data
from torch.utils.data.sampler import SubsetRandomSampler
import torchvision.models as models
import torchvision.datasets as datasets
import torchvision.transforms as transforms
from torchvision.utils import save_image
from PIL.ImageOps import grayscale
from PIL import Image
from get_mean_std import get_params
import random
from tensorboard_logger import configure, log_value
from model import mobilenetv3, EfficientNet
from focal_loss import FocalLoss
from utils import get_args_from_yaml, accuracy, precision, recall, AverageMeter, printlog, FastDataLoader
import threading
# make Logger
logger = logging.getLogger('Techwing_log_file')
logger.setLevel(logging.INFO)
# make Logger stream (콘솔창에 띄우고 싶으면 해당 주석처리 제거)
#streamHandler = logging.StreamHandler()
#logger.addHandler(streamHandler)
# used for logging to TensorBoard
best_prec1 = 0
print_dataset_statestics = False
# Set Ratio test & train set
validation_ratio = 0.1
random_seed = 10
#confidence
seed = random.randint(1,1000)
# UI 단에서 실행하는 Train 방식.
def UI_train(mode, q, **kwargs):
try:
if mode == 'Error':
args = get_args_from_yaml("configs/Error_config.yml")
elif mode == 'Type':
args = get_args_from_yaml('configs/ErrorType_config.yml')
else:
args = get_args_from_yaml('configs/All_config.yml')
args['model']['blocks'] = kwargs['blocknum']
args['data']['train'] = kwargs["data_path"]
args['train']['epochs'] = kwargs["epoch"]
args['optimizer']['type'] = kwargs["optim"]
args['optimizer']['lr'] = kwargs["lr"]
args['train']["batch-size"] = kwargs["batch_size"]
args['predict']['batch-size'] = kwargs["batch_size"]
args['train']['size'] = kwargs["size"]
if kwargs['resume']:
q.put(f"resume training with checkpoint : {kwargs['ck_path']}")
args['train']['resume'] = kwargs['ck_path']
timestring = time.strftime('%Y-%m-%d-%H-%M-%S', time.localtime(time.time()))
args['id'] = "train_" + timestring
main(args, q=q)
except Exception as ex:
q.put(f"실행 중 에러가 발생하였습니다. 자세한 사항은 보시려면 로그를 확인해 주세요")
logger.info(ex)
# args: user hyperparameters, q: Queue of UI
def main(args, q=None):
try:
logdir = f"logs/{args['task']}/"
if not os.path.exists(logdir):
os.makedirs(logdir)
if q == None:
streamHandler = logging.StreamHandler()
logger.addHandler(streamHandler)
# 따로 적는 Logfile 설정 및 stream 설정.
fileHandler = logging.FileHandler(logdir + f"{args['id']}_{args['modelname']}_block_{args['model']['blocks']}.log")
logger.addHandler(fileHandler)
timestring = time.strftime('%Y-%m-%d-%H-%M-%S', time.localtime(time.time()))
logger.info(timestring)
# Train & Validate
run_model(args, q)
timestring = time.strftime('%Y-%m-%d-%H-%M-%S', time.localtime(time.time()))
logger.info(timestring)
logging.info(f"[{args['id']}] done")
except Exception:
logger.error("Fatal error in main loop", exc_info=True)
logger.warn(f"[{args['id']}] failed")
# args : Yaml에서 가져온 설정 파일, q : Dialog 에 입력하기 위하여 쓰인 Queue
def run_model(args, q=None):
global best_prec1
#실험 정보 기입. (task= All, id= 랜덤 자연수, model= mobilenetv3, epoch= 1~3000, block= 1~11, class= 2~8)
args['task'] = "%s/%s_model=%s-ep=%s-block=%s-class=%s" % (
args['task'],
args['id'],
args['modelname'],
args['train']['epochs'],
args['model']['blocks'],
args['model']['class'])
logger.info(f"use seed {seed}")
logger.info(f"use dataset : {args['data']['train']}")
# get GPU information from configs file
logger.info(args)
gpus = args['gpu']
resize_size = args['train']['size']
############################# 데이터 불러오는 과정 #############################
# Data loading code
#feature 노말라이즈 적용.
mean, std = get_params(args['data']['train'], resize_size)
normalize = transforms.Normalize(mean=[mean[0].item()],
std=[std[0].item()])
#Train data loader에 적용하는 함수 (순서대로 적용됨.)
if args['train']['augment']:
transform_train = transforms.Compose([
transforms.Resize((resize_size, resize_size)), # 가로세로 크기 조정
transforms.ColorJitter(0.2,0.2,0.2), # 밝기, 대비, 채도 조정
transforms.RandomRotation(2), # -2~ 2도 만큼 회전
transforms.RandomAffine(5), # affine 변환 (평행사변형이 된다든지, 사다리꼴이 된다든지)
transforms.RandomCrop(resize_size, padding=2), # 원본에서 padding을 상하좌우 2로 둔 뒤, 64만큼 자름
transforms.RandomHorizontalFlip(), # Data 변환 좌우 반전
transforms.Grayscale(),
transforms.ToTensor(),
normalize
])
# option not augment
else:
transform_train = transforms.Compose([
transforms.Resize((resize_size, resize_size)),
transforms.ToTensor(),
normalize
])
# Test loader에 적용하는 함수.
transform_test = transforms.Compose([
transforms.Resize((resize_size, resize_size)),
transforms.Grayscale(),
transforms.ToTensor(),
normalize
])
# num workers : GPU의 스레드개수, Pin memory는 GPU에 데이터를 올리는 설정.
kwargs = {'num_workers': args['train']['worker'], 'pin_memory': True}
# image Folder config에서 설정한 path로 해주면 해당 디렉토리 안의 디렉토리의 이름이 Class가 된다.
train_data = datasets.ImageFolder(args['data']['train'], transform_train)
val_data = datasets.ImageFolder(args['data']['train'],transform_test)
# Train data를 전체 데이터로 설정하였기 때문에 validation을 진행하기 위해서
# validation ratio = 0.1 즉 10%를 validation set으로 설정.
random_seed = 10
validation_ratio = 0.1
num_train = len(train_data)
indices = list(range(num_train))
split = int(np.floor(validation_ratio * num_train))
# 랜덤 시드 설정. (Train이나 ,Test 일때 모두 10 이므로 같은 데이터셋이라 할 수 있다)
np.random.seed(random_seed)
np.random.shuffle(indices)
# Train set, Validation set 나누기.
train_idx, valid_idx = indices[split:], indices[:split]
train_sampler = SubsetRandomSampler(train_idx)
valid_sampler = SubsetRandomSampler(valid_idx)
train_loader = FastDataLoader(
train_data, batch_size=args['train']['batch-size'], sampler=train_sampler, #shuffle = True
**kwargs)
val_loader = FastDataLoader(
val_data, batch_size=args['train']['batch-size'], sampler=valid_sampler, #shuffle = False
**kwargs)
############################## 모델 설정 과정 #############################
# Convolution 초기화 할 때 Random이 사용되는데 그때 사용되는 Seed 설정.
torch.manual_seed(seed)
# 각 클래스마다 weight를 주어서 차등적으로 학습
class_weights = torch.FloatTensor(args['train']['weight'])
# Loss 함수 설정. Cross entropy 사용.
criterion = nn.CrossEntropyLoss(weight=class_weights)
# 모델로는 Mobilenet V3 사용
model = mobilenetv3(n_class=args['model']['class'], blocknum=args['model']['blocks'], dropout=0.5)
# SGD를 사용. (Adam이 성능이 우월하지만 Tuning을 잘한 SGD가 Adam보다 성능이 좋을 때가 많기 때문에 SGD 사용.)
if args['optimizer']['type'] == "SGD":
optimizer = torch.optim.SGD(model.parameters(), args['optimizer']['lr'],
momentum=args['optimizer']['momentum'],
nesterov=True,
weight_decay=args['optimizer']['weight_decay'])
scheduler = torch.optim.lr_scheduler.CosineAnnealingLR(optimizer, len(train_loader))
else:
optimizer = torch.optim.Adam(model.parameters(), args['optimizer']['lr'],
weight_decay=args['optimizer']['weight_decay'])
scheduler = None #Adam을 사용하면 Optimizer에서 LR을 줄여주므로 스케쥴러 사용하지 않음.
# get the number of model parameters
logger.info('Number of model parameters: {}'.format(
sum([p.data.nelement() for p in model.parameters()])))
# for training on multiple GPUs.
# Use CUDA_VISIBLE_DEVICES=0,1 to specify which GPUs to use
# 멀티 GPU 설정.
if torch.cuda.is_available():
torch.cuda.set_device(gpus[0])
with torch.cuda.device(gpus[0]):
model = model.cuda()
criterion = criterion.cuda()
model = torch.nn.DataParallel(model, device_ids=gpus, output_device=gpus[0]) # 모델을 다른 GPU에 뿌려준 다음 Gradient를 한 군데에서 계산하기 때문에 보통 0번 GPU에 많은 메로리가 할당됨.
# 하나의 GPU에 많은 메모리가 할당되면 batchsize를 늘릴 수 없기 때문에 이를 해결하기 위하여 output_device를 할당.
# 해당 코드는 데이터의 크기가 작기 때문에 0번에다가 모두 처리하는 것으로 설정.
else:
model = torch.nn.DataParallel(model)
device = torch.device("cpu")
model.to(device)
criterion.to(device)
# 유저가 입력한 Checkpoint에서 다시 retraining 한다고 설정을 하였을 때.
if args['train']['resume']:
# 해당 경로에 실제로 파일이 있으면.
if os.path.isfile(args['train']['resume']):
logger.info(f"=> loading checkpoint '{args['train']['resume']}'")
if torch.cuda.is_available():
checkpoint = torch.load(args['train']['resume'])
else:
checkpoint = torch.load(args['train']['resume'], map_location=torch.device('cpu'))
# 시작 Epoch또한 checkpoint의 epoch로 설정.
args['train']['start-epoch'] = checkpoint['epoch']
# checkpoint에서 나온 Best accuracy를 가져옴.
best_prec1 = checkpoint['best_prec1']
# 앞서 선언한 모델에 weight들을 설정.
model.load_state_dict(checkpoint['state_dict'])
logger.info(f"=> loaded checkpoint '{args['train']['resume']}' (epoch {checkpoint['epoch']})")
# 파일이 없다면
else:
logger.info(f"=> no checkpoint found at '{args.resume}'")
# True를 설정하면 Cudnn 라이브러리에서 hardware에 따라 사용하는 내부 알고리즘을 바꾸어 준다.
# Tensor의 크기나 Gpu Memory에 따라 효율적인 convolution 알고리즘이 다르기 때문.
cudnn.benchmark = True
for epoch in range(args['train']['start-epoch'], args['train']['epochs']):
# train for one epoch
train(train_loader, model, criterion, optimizer, scheduler, epoch, args, q)
# evaluate on validation set
prec1, prec, rec = validate(val_loader, model, criterion, epoch, args, q)
# remember best prec@1 and save checkpoint
is_best = prec1 >= best_prec1
best_prec1 = max(prec1, best_prec1)
checkpoint = save_checkpoint({
'epoch': epoch + 1,
'state_dict': model.state_dict(),
'best_prec1': best_prec1,
}, is_best, args, args['model']['blocks'], q)
printlog(f'Best accuracy: {best_prec1}', logger, q)
if args['model']['class'] !=2:
for i in range(len(prec)):
logger.info(' * Precision {prec.avg:.3f}'.format(prec=prec[i]))
logger.info(' * recall {rec.avg:.3f}'.format(rec=rec[i]))
else:
logger.info(' * Precision {prec.avg:.3f}'.format(prec=prec))
logger.info(' * recall {rec.avg:.3f}'.format(rec=rec))
#count = count + 1
return checkpoint
# train_loader : 데이터 로더
# Model : MobilenetV3(default).
# criterion : Crossentropy (default).
# scheduler : CosineAnnealing.
# epoch : 3000.
# args : config parameters. yaml 파일에서 확인하실 수 있습니다.
# q : UI창에서 글을 쓰는 역활을 하는 Queue. (쓰레드에서 Queue에서 지속적으로 가지고와서 입력.)
def train(train_loader, model, criterion, optimizer, scheduler, epoch, args, q=None):
"""Train for one epoch on the training set"""
batch_time = AverageMeter()
losses = AverageMeter()
top1 = AverageMeter()
prec = AverageMeter()
# switch to train mode
model.train()
end = time.time()
for i, (input, target) in enumerate(train_loader):
if torch.cuda.is_available():
target = target.cuda()
input = input.cuda()
# compute output
output = model(input)
loss = criterion(output, target)
# measure accuracy and record loss
prec1 = accuracy(output, target, topk=(1,))[0]
losses.update(loss.item(), input.size(0))
top1.update(prec1.item(), input.size(0))
# compute gradient and do SGD step
optimizer.zero_grad()
loss.backward()
optimizer.step()
if scheduler != None:
scheduler.step()
# measure elapsed time
batch_time.update(time.time() - end)
end = time.time()
if i % args['etc']['print_freq'] == 0:
# Error / Normal Case를 분류하는 Task의 결과 string.
if args['model']['class'] == 2:
logger.info('Epoch: [{0}][{1}/{2}]\t'
'Time {batch_time.val:.3f} ({batch_time.avg:.3f})\t'
'Loss {loss.val:.4f} ({loss.avg:.4f})\t'
'Prec@1 {top1.val:.3f} ({top1.avg:.3f})\t'
'Precision {prec.val:.3f} ({prec.avg:.3f})'
.format(
epoch, i, len(train_loader), batch_time=batch_time,
loss=losses, top1=top1, prec=prec))
# All task나 ErrorType Task를 실행하였을 때의 결과 string. (6개 이상의 Class의 Prcision을 표현하기에는 문제가 있을것 같아서
# precision 부분은 맨 마지막에 호출하는 것으로 구성.)
else:
logger.info('Epoch: [{0}][{1}/{2}]\t'
'Time {batch_time.val:.3f} ({batch_time.avg:.3f})\t'
'Loss {loss.val:.4f} ({loss.avg:.4f})\t'
'Prec@1 {top1.val:.3f} ({top1.avg:.3f})\t'
.format(
epoch, i, len(train_loader), batch_time=batch_time,
loss=losses, top1=top1))
# log to TensorBoard
if args['etc']['tensorboard']:
log_value('train_loss', losses.avg, epoch)
log_value('train_acc', top1.avg, epoch)
def validate(val_loader, model, criterion, epoch, args, q=None):
"""Perform validaadd_model_to_queuetion on the validation set"""
with torch.no_grad():
batch_time = AverageMeter()
losses = AverageMeter()
top1 = AverageMeter()
prec = []
rec = []
if args['model']['class'] == 2:
prec = AverageMeter()
rec = AverageMeter()
else:
for i in range(args['model']['class']):
prec.append(AverageMeter())
rec.append(AverageMeter())
# switch to evaluate mode
model.eval()
end = time.time()
for i, (input, target) in enumerate(val_loader):
if torch.cuda.is_available():
target = target.cuda()
input = input.cuda()
# compute output
output = model(input)
loss = criterion(output, target)
# measure accuracy and record loss
prec1 = accuracy(output.data, target, topk=(1,))[0]
losses.update(loss.item(), input.size(0))
top1.update(prec1.item(), input.size(0))
if args['model']['class'] == 2:
prec.update(precision(output.data, target, target_class=0), input.size(0))
rec.update(recall(output.data, target, target_class=0), input.size(0))
else:
for k in range(args['model']['class']):
prec[k].update(precision(output.data, target, target_class=k), input.size(0))
rec[k].update(recall(output.data, target, target_class=k), input.size(0))
# measure elapsed time
batch_time.update(time.time() - end)
end = time.time()
if i % args['etc']['print_freq'] == 0:
if args['model']['class'] == 2:
logger.info('Test: [{0}/{1}]\t'
'Time {batch_time.val:.3f} ({batch_time.avg:.3f})\t'
'Loss {loss.val:.4f} ({loss.avg:.4f})\t'
'Prec@1 {top1.val:.3f} ({top1.avg:.3f})\t'
'Precision {prec.val:.3f} ({prec.avg:.3f})'
.format(
i, len(val_loader), batch_time=batch_time, loss=losses,
top1=top1, prec=prec))
else:
logger.info('Test: [{0}/{1}]\t'
'Time {batch_time.val:.3f} ({batch_time.avg:.3f})\t'
'Loss {loss.val:.4f} ({loss.avg:.4f})\t'
'Prec@1 {top1.val:.3f} ({top1.avg:.3f})'
.format(
i, len(val_loader), batch_time=batch_time, loss=losses,
top1=top1))
printlog(' * epoch: {epoch} Prec@1 {top1.avg:.3f}'.format(epoch=epoch,top1=top1), logger, q)
if args['model']['class'] == 2:
logger.info(' * Precision {prec.avg:.3f}'.format(prec=prec))
logger.info(' * recall {rec.avg:.3f}'.format(rec=rec))
# log to TensorBoard
if args['etc']['tensorboard']:
log_value('val_loss', losses.avg, epoch)
log_value('val_acc', top1.avg, epoch)
return top1.avg, prec, rec
def save_checkpoint(state, is_best, args, block, q, filename='checkpoint.pth.tar'):
"""Saves checkpoint to disk"""
directory = "%s/%s/" % (args['output'], args['task'])
if not os.path.exists(directory):
os.makedirs(directory)
filename = directory + filename
torch.save(state, filename)
logger.info(f"Checkpoint Saved: {filename}")
best_filename = f"{args['output']}/{args['task']}/model_best.pth.tar"
if is_best:
shutil.copyfile(filename, best_filename)
logger.info(f"New Best Checkpoint saved: {best_filename}")
return best_filename
def save_error_case(output, target,epoch, topk=(1,), input=None):
maxk = max(topk)
batch_size = target.size(0)
_, pred = output.topk(maxk, 1, True, True)
pred = pred.t()
correct = pred.eq(target.view(1, -1).expand_as(pred))
pred = pred.view(batch_size)
correct = correct.view(batch_size)
for idx, correct_element in enumerate(correct):
image = input[idx]
save_image(image, f"error_case/epoch_{epoch}_idx_{idx}_case_{pred[idx]}_{target[idx]}.bmp")
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument("--config", required=True, help="train config file") #config 파일을 디폴트로 받음.
args = parser.parse_args()
if args.config == 'Error':
args = get_args_from_yaml("configs/Error_config.yml")
elif args.config == 'ErrorType':
args = get_args_from_yaml('configs/ErrorType_config.yml')
else:
args = get_args_from_yaml('configs/All_config.yml')
#job id
args['id'] = str(random.randint(0,99999))
main(args)
\ No newline at end of file
import yaml
import torchvision.datasets as datasets
import re
import math
import collections
from functools import partial
import torch
from torch import nn
from torch.nn import functional as F
from torch.utils import model_zoo
class AverageMeter(object):
"""Computes and stores the average and current value"""
def __init__(self):
self.reset()
def reset(self):
self.val = 0
self.avg = 0
self.sum = 0
self.count = 0
def update(self, val, n=1):
self.val = val
self.sum += val * n
self.count += n
self.avg = self.sum / self.count
# 기존 ImageFolder는 For문에 들어갈 때 튜플 (input, label)을 만들지만
# 해당 클래스는 (input, label, path) 까지 만들어 내도록 구성.
class MyImageFolder(datasets.ImageFolder):
def __getitem__(self, index):
# return image path
return super(MyImageFolder, self).__getitem__(index), self.imgs[index]
# instead of BatchSampler
class _RepeatSampler(object):
""" Sampler that repeats forever.
Args:
sampler (Sampler)
"""
def __init__(self, sampler):
self.sampler = sampler
def __iter__(self):
while True:
yield from iter(self.sampler)
class FastDataLoader(torch.utils.data.dataloader.DataLoader):
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
object.__setattr__(self, 'batch_sampler', _RepeatSampler(self.batch_sampler)) #기존의 Batch sampler를 wrap
self.iterator = super().__iter__() #Multiprocessing 인지 singleProcessing인지.
def __len__(self):
return len(self.batch_sampler.sampler)
def __iter__(self):
for i in range(len(self)):
yield next(self.iterator)
# yml 파일 안에 적혀진 정보 받기.
def get_args_from_yaml(file):
with open(file) as f:
conf = yaml.load(f)
return conf
# output : model output
# target : input's label
#
def accuracy(output, target, topk=(1,)):
"""Computes the precision@k for the specified values of k"""
maxk = max(topk)
batch_size = target.size(0)
_, pred = output.topk(maxk, 1, True, True)
# transpose (target과 사이즈를 맞추게 하기 위해)
pred = pred.t()
correct = pred.eq(target.view(1, -1).expand_as(pred))
res = []
for k in topk:
correct_k = correct[:k].view(-1).float().sum(0)
res.append(correct_k.mul_(100.0 / batch_size))
return res
#에러를 기본 Class로 설정
def precision(output, target, e=1e-3, target_class=0):
_, pred = output.topk(1, 1, True, True)
pred = pred.t()
new_target = target.view(1, -1).expand_as(pred)
pred = pred.squeeze()
new_target = new_target.squeeze()
true_positives = sum(1 for i in range(len(pred))
if pred[i] == target[i] and target[i] == target_class) # (ture positive)
false_positives = sum(1 for i in range(len(pred))
if pred[i] == target_class and target[i] != target_class) #예측한거와 label이 다르고 예측한게 Error라고 생각하는 경우.
#logger.info("TP: %s, FP: %s" % (true_positives, false_positives))
if true_positives + false_positives == 0 and true_positives == 0:
return 100.
return (true_positives / (true_positives + false_positives + e)) * 100.
#에러를 기본 class로 설정
def recall(output, target, e=1e-3, target_class=0):
_, pred = output.topk(1, 1, True, True)
pred = pred.t()
new_target = target.view(1, -1).expand_as(pred)
pred = pred.squeeze()
new_target = new_target.squeeze()
true_positives = sum(1 for i in range(len(pred))
if pred[i] == target[i] and target[i] == target_class) # (ture positive)
false_nagatives = sum(1 for i in range(len(pred))
if pred[i] != target_class and target[i] == target_class)
if true_positives + false_nagatives == 0 and true_positives == 0:
return 100.
return (true_positives / (true_positives + false_nagatives + e)) * 100.
def printlog(string, logger, q):
if q!=None:
q.put(string)
logger.info(string)
########################################################################
############### HELPERS FUNCTIONS FOR MODEL ARCHITECTURE ###############
########################################################################
# Parameters for the entire model (stem, all blocks, and head)
GlobalParams = collections.namedtuple('GlobalParams', [
'batch_norm_momentum', 'batch_norm_epsilon', 'dropout_rate',
'num_classes', 'width_coefficient', 'depth_coefficient',
'depth_divisor', 'min_depth', 'drop_connect_rate', 'image_size'])
# Parameters for an individual model block
BlockArgs = collections.namedtuple('BlockArgs', [
'kernel_size', 'num_repeat', 'input_filters', 'output_filters',
'expand_ratio', 'id_skip', 'stride', 'se_ratio'])
# Change namedtuple defaults
GlobalParams.__new__.__defaults__ = (None,) * len(GlobalParams._fields)
BlockArgs.__new__.__defaults__ = (None,) * len(BlockArgs._fields)
class SwishImplementation(torch.autograd.Function):
@staticmethod
def forward(ctx, i):
result = i * torch.sigmoid(i)
ctx.save_for_backward(i)
return result
@staticmethod
def backward(ctx, grad_output):
i = ctx.saved_variables[0]
sigmoid_i = torch.sigmoid(i)
return grad_output * (sigmoid_i * (1 + i * (1 - sigmoid_i)))
class MemoryEfficientSwish(nn.Module):
def forward(self, x):
return SwishImplementation.apply(x)
class Swish(nn.Module):
def forward(self, x):
return x * torch.sigmoid(x)
def round_filters(filters, global_params):
""" Calculate and round number of filters based on depth multiplier. """
multiplier = global_params.width_coefficient
if not multiplier:
return filters
divisor = global_params.depth_divisor
min_depth = global_params.min_depth
filters *= multiplier
min_depth = min_depth or divisor
new_filters = max(min_depth, int(filters + divisor / 2) // divisor * divisor)
if new_filters < 0.9 * filters: # prevent rounding by more than 10%
new_filters += divisor
return int(new_filters)
def round_repeats(repeats, global_params):
""" Round number of filters based on depth multiplier. """
multiplier = global_params.depth_coefficient
if not multiplier:
return repeats
return int(math.ceil(multiplier * repeats))
def drop_connect(inputs, p, training):
""" Drop connect. """
if not training: return inputs
batch_size = inputs.shape[0]
keep_prob = 1 - p
random_tensor = keep_prob
random_tensor += torch.rand([batch_size, 1, 1, 1], dtype=inputs.dtype, device=inputs.device)
binary_tensor = torch.floor(random_tensor)
output = inputs / keep_prob * binary_tensor
return output
def get_same_padding_conv2d(image_size=None):
""" Chooses static padding if you have specified an image size, and dynamic padding otherwise.
Static padding is necessary for ONNX exporting of models. """
if image_size is None:
return Conv2dDynamicSamePadding
else:
return partial(Conv2dStaticSamePadding, image_size=image_size)
class Conv2dDynamicSamePadding(nn.Conv2d):
""" 2D Convolutions like TensorFlow, for a dynamic image size """
def __init__(self, in_channels, out_channels, kernel_size, stride=1, dilation=1, groups=1, bias=True):
super().__init__(in_channels, out_channels, kernel_size, stride, 0, dilation, groups, bias)
self.stride = self.stride if len(self.stride) == 2 else [self.stride[0]] * 2
def forward(self, x):
ih, iw = x.size()[-2:]
kh, kw = self.weight.size()[-2:]
sh, sw = self.stride
oh, ow = math.ceil(ih / sh), math.ceil(iw / sw)
pad_h = max((oh - 1) * self.stride[0] + (kh - 1) * self.dilation[0] + 1 - ih, 0)
pad_w = max((ow - 1) * self.stride[1] + (kw - 1) * self.dilation[1] + 1 - iw, 0)
if pad_h > 0 or pad_w > 0:
x = F.pad(x, [pad_w // 2, pad_w - pad_w // 2, pad_h // 2, pad_h - pad_h // 2])
return F.conv2d(x, self.weight, self.bias, self.stride, self.padding, self.dilation, self.groups)
class Conv2dStaticSamePadding(nn.Conv2d):
""" 2D Convolutions like TensorFlow, for a fixed image size"""
def __init__(self, in_channels, out_channels, kernel_size, image_size=None, **kwargs):
super().__init__(in_channels, out_channels, kernel_size, **kwargs)
self.stride = self.stride if len(self.stride) == 2 else [self.stride[0]] * 2
# Calculate padding based on image size and save it
assert image_size is not None
ih, iw = image_size if type(image_size) == list else [image_size, image_size]
kh, kw = self.weight.size()[-2:]
sh, sw = self.stride
oh, ow = math.ceil(ih / sh), math.ceil(iw / sw)
pad_h = max((oh - 1) * self.stride[0] + (kh - 1) * self.dilation[0] + 1 - ih, 0)
pad_w = max((ow - 1) * self.stride[1] + (kw - 1) * self.dilation[1] + 1 - iw, 0)
if pad_h > 0 or pad_w > 0:
self.static_padding = nn.ZeroPad2d((pad_w // 2, pad_w - pad_w // 2, pad_h // 2, pad_h - pad_h // 2))
else:
self.static_padding = Identity()
def forward(self, x):
x = self.static_padding(x)
x = F.conv2d(x, self.weight, self.bias, self.stride, self.padding, self.dilation, self.groups)
return x
class Identity(nn.Module):
def __init__(self, ):
super(Identity, self).__init__()
def forward(self, input):
return input
########################################################################
############## HELPERS FUNCTIONS FOR LOADING MODEL PARAMS ##############
########################################################################
def efficientnet_params(model_name):
""" Map EfficientNet model name to parameter coefficients. """
params_dict = {
# Coefficients: width,depth,res,dropout
'efficientnet-b0': (1.0, 1.0, 224, 0.2),
'efficientnet-b1': (1.0, 1.1, 240, 0.2),
'efficientnet-b2': (1.1, 1.2, 260, 0.3),
'efficientnet-b3': (1.2, 1.4, 300, 0.3),
'efficientnet-b4': (1.4, 1.8, 380, 0.4),
'efficientnet-b5': (1.6, 2.2, 456, 0.4),
'efficientnet-b6': (1.8, 2.6, 528, 0.5),
'efficientnet-b7': (2.0, 3.1, 600, 0.5),
'efficientnet-b8': (2.2, 3.6, 672, 0.5),
'efficientnet-l2': (4.3, 5.3, 800, 0.5),
}
return params_dict[model_name]
class BlockDecoder(object):
""" Block Decoder for readability, straight from the official TensorFlow repository """
@staticmethod
def _decode_block_string(block_string):
""" Gets a block through a string notation of arguments. """
assert isinstance(block_string, str)
ops = block_string.split('_')
options = {}
for op in ops:
splits = re.split(r'(\d.*)', op)
if len(splits) >= 2:
key, value = splits[:2]
options[key] = value
# Check stride
assert (('s' in options and len(options['s']) == 1) or
(len(options['s']) == 2 and options['s'][0] == options['s'][1]))
return BlockArgs(
kernel_size=int(options['k']),
num_repeat=int(options['r']),
input_filters=int(options['i']),
output_filters=int(options['o']),
expand_ratio=int(options['e']),
id_skip=('noskip' not in block_string),
se_ratio=float(options['se']) if 'se' in options else None,
stride=[int(options['s'][0])])
@staticmethod
def _encode_block_string(block):
"""Encodes a block to a string."""
args = [
'r%d' % block.num_repeat,
'k%d' % block.kernel_size,
's%d%d' % (block.strides[0], block.strides[1]),
'e%s' % block.expand_ratio,
'i%d' % block.input_filters,
'o%d' % block.output_filters
]
if 0 < block.se_ratio <= 1:
args.append('se%s' % block.se_ratio)
if block.id_skip is False:
args.append('noskip')
return '_'.join(args)
@staticmethod
def decode(string_list):
"""
Decodes a list of string notations to specify blocks inside the network.
:param string_list: a list of strings, each string is a notation of block
:return: a list of BlockArgs namedtuples of block args
"""
assert isinstance(string_list, list)
blocks_args = []
for block_string in string_list:
blocks_args.append(BlockDecoder._decode_block_string(block_string))
return blocks_args
@staticmethod
def encode(blocks_args):
"""
Encodes a list of BlockArgs to a list of strings.
:param blocks_args: a list of BlockArgs namedtuples of block args
:return: a list of strings, each string is a notation of block
"""
block_strings = []
for block in blocks_args:
block_strings.append(BlockDecoder._encode_block_string(block))
return block_strings
def efficientnet(width_coefficient=None, depth_coefficient=None, dropout_rate=0.2,
drop_connect_rate=0.2, image_size=None, num_classes=2):
""" Creates a efficientnet model. """
blocks_args = [
'r1_k3_s11_e1_i32_o16_se0.25', 'r2_k3_s22_e6_i16_o24_se0.25',
'r2_k5_s22_e6_i24_o40_se0.25', 'r3_k3_s22_e6_i40_o80_se0.25',
'r3_k5_s11_e6_i80_o112_se0.25', 'r4_k5_s22_e6_i112_o192_se0.25',
'r1_k3_s11_e6_i192_o320_se0.25',
]
blocks_args = BlockDecoder.decode(blocks_args)
global_params = GlobalParams(
batch_norm_momentum=0.99,
batch_norm_epsilon=1e-3,
dropout_rate=dropout_rate,
drop_connect_rate=drop_connect_rate,
# data_format='channels_last', # removed, this is always true in PyTorch
num_classes=num_classes,
width_coefficient=width_coefficient,
depth_coefficient=depth_coefficient,
depth_divisor=8,
min_depth=None,
image_size=image_size,
)
return blocks_args, global_params
def get_model_params(model_name, override_params):
""" Get the block args and global params for a given model """
if model_name.startswith('efficientnet'):
w, d, s, p = efficientnet_params(model_name)
# note: all models have drop connect rate = 0.2
blocks_args, global_params = efficientnet(
width_coefficient=w, depth_coefficient=d, dropout_rate=p, image_size=s)
else:
raise NotImplementedError('model name is not pre-defined: %s' % model_name)
if override_params:
# ValueError will be raised here if override_params has fields not included in global_params.
global_params = global_params._replace(**override_params)
return blocks_args, global_params
url_map = {
'efficientnet-b0': 'https://github.com/lukemelas/EfficientNet-PyTorch/releases/download/1.0/efficientnet-b0-355c32eb.pth',
'efficientnet-b1': 'https://github.com/lukemelas/EfficientNet-PyTorch/releases/download/1.0/efficientnet-b1-f1951068.pth',
'efficientnet-b2': 'https://github.com/lukemelas/EfficientNet-PyTorch/releases/download/1.0/efficientnet-b2-8bb594d6.pth',
'efficientnet-b3': 'https://github.com/lukemelas/EfficientNet-PyTorch/releases/download/1.0/efficientnet-b3-5fb5a3c3.pth',
'efficientnet-b4': 'https://github.com/lukemelas/EfficientNet-PyTorch/releases/download/1.0/efficientnet-b4-6ed6700e.pth',
'efficientnet-b5': 'https://github.com/lukemelas/EfficientNet-PyTorch/releases/download/1.0/efficientnet-b5-b6417697.pth',
'efficientnet-b6': 'https://github.com/lukemelas/EfficientNet-PyTorch/releases/download/1.0/efficientnet-b6-c76e70fd.pth',
'efficientnet-b7': 'https://github.com/lukemelas/EfficientNet-PyTorch/releases/download/1.0/efficientnet-b7-dcc49843.pth',
}
url_map_advprop = {
'efficientnet-b0': 'https://github.com/lukemelas/EfficientNet-PyTorch/releases/download/1.0/adv-efficientnet-b0-b64d5a18.pth',
'efficientnet-b1': 'https://github.com/lukemelas/EfficientNet-PyTorch/releases/download/1.0/adv-efficientnet-b1-0f3ce85a.pth',
'efficientnet-b2': 'https://github.com/lukemelas/EfficientNet-PyTorch/releases/download/1.0/adv-efficientnet-b2-6e9d97e5.pth',
'efficientnet-b3': 'https://github.com/lukemelas/EfficientNet-PyTorch/releases/download/1.0/adv-efficientnet-b3-cdd7c0f4.pth',
'efficientnet-b4': 'https://github.com/lukemelas/EfficientNet-PyTorch/releases/download/1.0/adv-efficientnet-b4-44fb3a87.pth',
'efficientnet-b5': 'https://github.com/lukemelas/EfficientNet-PyTorch/releases/download/1.0/adv-efficientnet-b5-86493f6b.pth',
'efficientnet-b6': 'https://github.com/lukemelas/EfficientNet-PyTorch/releases/download/1.0/adv-efficientnet-b6-ac80338e.pth',
'efficientnet-b7': 'https://github.com/lukemelas/EfficientNet-PyTorch/releases/download/1.0/adv-efficientnet-b7-4652b6dd.pth',
'efficientnet-b8': 'https://github.com/lukemelas/EfficientNet-PyTorch/releases/download/1.0/adv-efficientnet-b8-22a8fe65.pth',
}
def load_pretrained_weights(model, model_name, load_fc=True, advprop=False):
""" Loads pretrained weights, and downloads if loading for the first time. """
# AutoAugment or Advprop (different preprocessing)
url_map_ = url_map_advprop if advprop else url_map
state_dict = model_zoo.load_url(url_map_[model_name])
if load_fc:
model.load_state_dict(state_dict)
else:
state_dict.pop('_fc.weight')
state_dict.pop('_fc.bias')
res = model.load_state_dict(state_dict, strict=False)
assert set(res.missing_keys) == set(['_fc.weight', '_fc.bias']), 'issue loading pretrained weights'
print('Loaded pretrained weights for {}'.format(model_name))
## noising ##
def stochastic_depth(inputs, is_training, stochastic_depth_rate=0.2):
'''Apply stochastic depth.'''
if not is_training:
return inputs
# Compute keep_prob
# TODO(tanmingxing): add support for training progress.
keep_prob = 1.0 - stochastic_depth_rate
# Compute stochastic_depth tensor
batch_size = inputs.shape[0]
random_tensor = keep_prob
random_tensor += torch.rand([batch_size, 1, 1, 1], dtype=inputs.dtype)
binary_tensor = torch.floor(random_tensor)
output = torch.div(inputs, keep_prob) * binary_tensor
return output
\ No newline at end of file
No preview for this file type
from __future__ import print_function
from model import mobilenetv3
import os
import glob
import copy
import os.path as osp
import click
import cv2
import matplotlib.cm as cm
import numpy as np
import torch
import torch.nn.functional as F
from torchvision import models, transforms
from visualize.grad_cam_utils import (
BackPropagation,
Deconvnet,
GradCAM,
GuidedBackPropagation,
occlusion_sensitivity,
)
# if a model includes LSTM, such as in image captioning,
# torch.backends.cudnn.enabled = False
def get_device(cuda):
cuda = cuda and torch.cuda.is_available()
device = torch.device("cuda" if cuda else "cpu")
if cuda:
current_device = torch.cuda.current_device()
print("Device:", torch.cuda.get_device_name(current_device))
else:
print("Device: CPU")
return device
def change_image_path(image_paths):
paths = []
for image_path in image_paths:
target_dir = os.path.normpath(image_path)
for (path, dir, files) in os.walk(target_dir):
for fname in files:
fullfname = path + "/" + fname
paths.append(fullfname)
return paths
def load_images(image_paths, normalize_factor):
images = []
raw_images = []
for i, image_path in enumerate(image_paths):
image, raw_image = preprocess(image_path, normalize_factor)
images.append(image)
raw_images.append(raw_image)
return images, raw_images, image_paths
def get_classtable():
classes = ["Error", "Normal"]
"""
with open("samples/synset_words.txt") as lines:
for line in lines:
line = line.strip().split(" ", 1)[1]
line = line.split(", ", 1)[0].replace(" ", "_")
classes.append(line)
"""
return classes
def preprocess(image_path, normalize_factor):
if normalize_factor != None:
mean = normalize_factor[0]
std = normalize_factor[1]
raw_image = cv2.imread(image_path,0)
raw_image = cv2.resize(raw_image, (128,) * 2)
if normalize_factor != None:
image = transforms.Compose(
[
transforms.ToTensor(),
transforms.Normalize(mean=mean, std=std),
]
)(raw_image[..., ::-1].copy())
else:
image = transforms.Compose(
[
transforms.ToTensor(),
]
)(raw_image[..., ::-1].copy())
return image, raw_image
def save_gradient(filename, gradient):
gradient = gradient.cpu().numpy().transpose(1, 2, 0)
gradient -= gradient.min()
gradient /= gradient.max()
gradient *= 255.0
cv2.imwrite(filename, np.uint8(gradient))
def save_gradcam(filename, gcam, raw_image, paper_cmap=False):
gcam = gcam.cpu().numpy()
cmap = cm.jet_r(gcam)[..., :3] * 255.0
if paper_cmap:
alpha = gcam[..., None]
gcam = alpha * cmap + (1 - alpha) * raw_image
else:
gcam = (cmap.astype(np.float) + raw_image.astype(np.float)) / 2
cv2.imwrite(filename, np.uint8(gcam))
def save_sensitivity(filename, maps):
maps = maps.cpu().numpy()
scale = max(maps[maps > 0].max(), -maps[maps <= 0].min())
maps = maps / scale * 0.5
maps += 0.5
maps = cm.bwr_r(maps)[..., :3]
maps = np.uint8(maps * 255.0)
maps = cv2.resize(maps, (224, 224), interpolation=cv2.INTER_NEAREST)
cv2.imwrite(filename, maps)
# torchvision models
model_names = sorted(
name
for name in models.__dict__
if name.islower() and not name.startswith("__") and callable(models.__dict__[name])
)
@click.group()
@click.pass_context
def main(ctx):
print("Mode:", ctx.invoked_subcommand)
def make_grad_cam(image_paths, output_dir, model, normalize_factor, cam_class, args, target_layer="module.features.5", arch="MobilenetV3", topk=1, cuda=True):
"""
Visualize model responses given multiple images
"""
if not os.path.exists(output_dir):
os.makedirs(output_dir)
image_paths = image_paths + "/*"
image_paths = glob.glob(image_paths)
if len(image_paths) == 0:
print("There's no images in folder!")
return
device = get_device(cuda)
# Synset words
classes = get_classtable()
# Model from torchvision
#model = models.__dict__[arch](pretrained=True)
model.to(device)
model.eval()
image_paths.sort()
image_paths.remove(f'eval_results/{args["task"]}/error_case/cam')
# Images
images, raw_images, _ = load_images(image_paths, normalize_factor)
images = torch.stack(images).to(device)
"""
Common usage:
1. Wrap your model with visualization classes defined in grad_cam.py
2. Run forward() with images
3. Run backward() with a list of specific classes
4. Run generate() to export results
"""
# =========================================================================
#print("Vanilla Backpropagation:")
bp = BackPropagation(model=model)
probs, ids = bp.forward(images) # sorted
for i in range(topk):
bp.backward(ids=ids[:, [i]])
gradients = bp.generate()
# Remove all the hook function in the "model"
bp.remove_hook()
# =========================================================================
print("Grad-CAM/Guided Backpropagation/Guided Grad-CAM:")
gcam = GradCAM(model=model)
_ = gcam.forward(images)
gbp = GuidedBackPropagation(model=model)
_ = gbp.forward(images)
for i in range(topk):
# Guided Backpropagation
gbp.backward(ids=ids[:, [i]])
gradients = gbp.generate()
# Grad-CAM
gcam.backward(ids=ids[:, [i]])
regions = gcam.generate(target_layer=target_layer)
for j in range(len(images)):
#print("\t#{}: {} ({:.5f})".format(j, classes[ids[j, i]], probs[j, i]))
# Grad-CAM
#if classes[ids[j, i]] == cam_class:
print(image_paths[j])
save_gradcam(
filename=osp.join(
output_dir,
"{}-{}-gradcam-{}-{}.png".format(
image_paths[j].split('/')[-1], arch, target_layer, classes[ids[j, i]]
),
),
gcam=regions[j, 0],
raw_image=np.expand_dims(raw_images[j],axis=2),
)
@main.command()
@click.option("-i", "--image-paths", type=str, multiple=True, required=True)
@click.option("-o", "--output-dir", type=str, default="./results")
@click.option("--cuda/--cpu", default=True)
def demo2(image_paths, output_dir, cuda):
"""
Generate Grad-CAM at different layers of ResNet-152
"""
device = get_device(cuda)
# Synset words
classes = get_classtable()
# Model
model = models.resnet152(pretrained=True)
model.to(device)
model.eval()
# The four residual layers
target_layers = ["relu", "layer1", "layer2", "layer3", "layer4"]
target_class = 243 # "bull mastif"
# Images
images, raw_images, _ = load_images(image_paths)
images = torch.stack(images).to(device)
gcam = GradCAM(model=model)
probs, ids = gcam.forward(images)
ids_ = torch.LongTensor([[target_class]] * len(images)).to(device)
gcam.backward(ids=ids_)
for target_layer in target_layers:
print("Generating Grad-CAM @{}".format(target_layer))
# Grad-CAM
regions = gcam.generate(target_layer=target_layer)
for j in range(len(images)):
print(
"\t#{}: {} ({:.5f})".format(
j, classes[target_class], float(probs[ids == target_class])
)
)
save_gradcam(
filename=osp.join(
output_dir,
"{}-{}-gradcam-{}-{}.png".format(
j, "resnet152", target_layer, classes[target_class]
),
),
gcam=regions[j, 0],
raw_image=raw_images[j],
)
@main.command()
@click.option("-i", "--image-paths", type=str, multiple=True, required=True)
@click.option("-a", "--arch", type=click.Choice(model_names), required=True)
@click.option("-k", "--topk", type=int, default=3)
@click.option("-s", "--stride", type=int, default=1)
@click.option("-b", "--n-batches", type=int, default=128)
@click.option("-o", "--output-dir", type=str, default="./results")
@click.option("--cuda/--cpu", default=True)
def demo3(image_paths, arch, topk, stride, n_batches, output_dir, cuda):
"""
Generate occlusion sensitivity maps
"""
device = get_device(cuda)
# Synset words
classes = get_classtable()
# Model from torchvision
model = models.__dict__[arch](pretrained=True)
model = torch.nn.DataParallel(model)
model.to(device)
model.eval()
# Images
images, _, _ = load_images(image_paths)
images = torch.stack(images).to(device)
print("Occlusion Sensitivity:")
patche_sizes = [10, 15, 25, 35, 45, 90]
logits = model(images)
probs = F.softmax(logits, dim=1)
probs, ids = probs.sort(dim=1, descending=True)
for i in range(topk):
for p in patche_sizes:
print("Patch:", p)
sensitivity = occlusion_sensitivity(
model, images, ids[:, [i]], patch=p, stride=stride, n_batches=n_batches
)
# Save results as image files
for j in range(len(images)):
print("\t#{}: {} ({:.5f})".format(j, classes[ids[j, i]], probs[j, i]))
save_sensitivity(
filename=osp.join(
output_dir,
"{}-{}-sensitivity-{}-{}.png".format(
j, arch, p, classes[ids[j, i]]
),
),
maps=sensitivity[j],
)
if __name__ == "__main__":
main()
#!/usr/bin/env python
# coding: utf-8
#
# Author: Kazuto Nakashima
# URL: http://kazuto1011.github.io
# Created: 2017-05-26
from collections import Sequence
import numpy as np
import torch
import torch.nn as nn
from torch.nn import functional as F
from tqdm import tqdm
class _BaseWrapper(object):
def __init__(self, model):
super(_BaseWrapper, self).__init__()
self.device = next(model.parameters()).device
self.model = model
self.handlers = [] # a set of hook function handlers
def _encode_one_hot(self, ids):
one_hot = torch.zeros_like(self.logits).to(self.device)
one_hot.scatter_(1, ids, 1.0)
return one_hot
def forward(self, image):
self.image_shape = image.shape[2:] #채널 사이즈
self.logits = self.model(image)
self.probs = F.softmax(self.logits, dim=1)
return self.probs.sort(dim=1, descending=True) # ordered results
def backward(self, ids):
"""
Class-specific backpropagation
"""
one_hot = self._encode_one_hot(ids)
self.model.zero_grad()
self.logits.backward(gradient=one_hot, retain_graph=True)
def generate(self):
raise NotImplementedError
def remove_hook(self):
"""
Remove all the forward/backward hook functions
"""
for handle in self.handlers:
handle.remove()
class BackPropagation(_BaseWrapper):
def forward(self, image):
self.image = image.requires_grad_()
return super(BackPropagation, self).forward(self.image)
def generate(self):
gradient = self.image.grad.clone()
self.image.grad.zero_()
return gradient
class GuidedBackPropagation(BackPropagation):
"""
"Striving for Simplicity: the All Convolutional Net"
https://arxiv.org/pdf/1412.6806.pdf
Look at Figure 1 on page 8.
"""
def __init__(self, model):
super(GuidedBackPropagation, self).__init__(model)
def backward_hook(module, grad_in, grad_out):
# Cut off negative gradients
if isinstance(module, nn.ReLU):
return (F.relu(grad_in[0]),)
for module in self.model.named_modules():
self.handlers.append(module[1].register_backward_hook(backward_hook))
class Deconvnet(BackPropagation):
"""
"Striving for Simplicity: the All Convolutional Net"
https://arxiv.org/pdf/1412.6806.pdf
Look at Figure 1 on page 8.
"""
def __init__(self, model):
super(Deconvnet, self).__init__(model)
def backward_hook(module, grad_in, grad_out):
# Cut off negative gradients and ignore ReLU
if isinstance(module, nn.ReLU):
return (F.relu(grad_out[0]),)
for module in self.model.named_modules():
self.handlers.append(module[1].register_backward_hook(backward_hook))
class GradCAM(_BaseWrapper):
"""
"Grad-CAM: Visual Explanations from Deep Networks via Gradient-based Localization"
https://arxiv.org/pdf/1610.02391.pdf
Look at Figure 2 on page 4
"""
def __init__(self, model, candidate_layers=None):
super(GradCAM, self).__init__(model)
self.fmap_pool = {}
self.grad_pool = {}
self.candidate_layers = candidate_layers # list
def save_fmaps(key):
def forward_hook(module, input, output):
self.fmap_pool[key] = output.detach()
return forward_hook
def save_grads(key):
def backward_hook(module, grad_in, grad_out):
self.grad_pool[key] = grad_out[0].detach()
return backward_hook
# If any candidates are not specified, the hook is registered to all the layers.
for name, module in self.model.named_modules():
if self.candidate_layers is None or name in self.candidate_layers:
self.handlers.append(module.register_forward_hook(save_fmaps(name)))
self.handlers.append(module.register_backward_hook(save_grads(name)))
def _find(self, pool, target_layer):
if target_layer in pool.keys():
return pool[target_layer]
else:
raise ValueError("Invalid layer name: {}".format(target_layer))
def generate(self, target_layer):
fmaps = self._find(self.fmap_pool, target_layer)
grads = self._find(self.grad_pool, target_layer)
weights = F.adaptive_avg_pool2d(grads, 1)
gcam = torch.mul(fmaps, weights).sum(dim=1, keepdim=True)
gcam = F.relu(gcam)
gcam = F.interpolate(
gcam, self.image_shape, mode="bilinear", align_corners=False
)
B, C, H, W = gcam.shape
gcam = gcam.view(B, -1)
gcam -= gcam.min(dim=1, keepdim=True)[0]
gcam /= gcam.max(dim=1, keepdim=True)[0]
gcam = gcam.view(B, C, H, W)
return gcam
def occlusion_sensitivity(
model, images, ids, mean=None, patch=35, stride=1, n_batches=128
):
"""
"Grad-CAM: Visual Explanations from Deep Networks via Gradient-based Localization"
https://arxiv.org/pdf/1610.02391.pdf
Look at Figure A5 on page 17
Originally proposed in:
"Visualizing and Understanding Convolutional Networks"
https://arxiv.org/abs/1311.2901
"""
torch.set_grad_enabled(False)
model.eval()
mean = mean if mean else 0
patch_H, patch_W = patch if isinstance(patch, Sequence) else (patch, patch)
pad_H, pad_W = patch_H // 2, patch_W // 2
# Padded image
images = F.pad(images, (pad_W, pad_W, pad_H, pad_H), value=mean)
B, _, H, W = images.shape
new_H = (H - patch_H) // stride + 1
new_W = (W - patch_W) // stride + 1
# Prepare sampling grids
anchors = []
grid_h = 0
while grid_h <= H - patch_H:
grid_w = 0
while grid_w <= W - patch_W:
grid_w += stride
anchors.append((grid_h, grid_w))
grid_h += stride
# Baseline score without occlusion
baseline = model(images).detach().gather(1, ids)
# Compute per-pixel logits
scoremaps = []
for i in tqdm(range(0, len(anchors), n_batches), leave=False):
batch_images = []
batch_ids = []
for grid_h, grid_w in anchors[i : i + n_batches]:
images_ = images.clone()
images_[..., grid_h : grid_h + patch_H, grid_w : grid_w + patch_W] = mean
batch_images.append(images_)
batch_ids.append(ids)
batch_images = torch.cat(batch_images, dim=0)
batch_ids = torch.cat(batch_ids, dim=0)
scores = model(batch_images).detach().gather(1, batch_ids)
scoremaps += list(torch.split(scores, B))
diffmaps = torch.cat(scoremaps, dim=1) - baseline
diffmaps = diffmaps.view(B, new_H, new_W)
return diffmaps
No preview for this file type