조현아

backup acc100 prob

......@@ -12,13 +12,15 @@ from utils import *
# command
# python eval.py --model_path='logs/April_16_00:26:10__resnet50__None/'
def eval(model_path):
def eval(model_path, num_data):
print('\n[+] Parse arguments')
kwargs_path = os.path.join(model_path, 'kwargs.json')
kwargs = json.loads(open(kwargs_path).read())
args, kwargs = parse_args(kwargs)
args.batch_size = num_data
pprint(args)
device = torch.device('cuda' if args.use_cuda else 'cpu')
print('\n[+] Create network')
model = select_model(args)
......@@ -45,8 +47,9 @@ def eval(model_path):
print('\n[+] Valid results')
print(' Acc@1 : {:.3f}%'.format(_test_res[0].data.cpu().numpy()[0]*100))
print(' Acc@5 : {:.3f}%'.format(_test_res[1].data.cpu().numpy()[0]*100))
print(' Loss : {:.3f}'.format(_test_res[2].data))
print(' Infer Time(per image) : {:.3f}ms'.format(_test_res[3]*1000 / len(test_dataset)))
print(' Acc_all : {:.3f}%'.format(_test_res[2].data.cpu().numpy()[0]*100))
print(' Loss : {:.3f}'.format(_test_res[3].data))
print(' Infer Time(per image) : {:.3f}ms'.format(_test_res[4]*1000 / len(test_dataset)))
writer.close()
......
......@@ -69,15 +69,13 @@ def train(**kwargs):
step, args.max_step, current_epoch, max_epoch, (time.time()-start_t)/60, optimizer.param_groups[0]['lr']))
writer.add_scalar('train/learning_rate', optimizer.param_groups[0]['lr'], global_step=step)
writer.add_scalar('train/acc1', _train_res[0], global_step=step)
writer.add_scalar('train/acc5', _train_res[1], global_step=step)
writer.add_scalar('train/loss', _train_res[2], global_step=step)
writer.add_scalar('train/forward_time', _train_res[3], global_step=step)
writer.add_scalar('train/backward_time', _train_res[4], global_step=step)
writer.add_scalar('train/loss', _train_res[1], global_step=step)
writer.add_scalar('train/forward_time', _train_res[2], global_step=step)
writer.add_scalar('train/backward_time', _train_res[3], global_step=step)
print(' Acc@1 : {:.3f}%'.format(_train_res[0].data.cpu().numpy()[0]*100))
print(' Acc@5 : {:.3f}%'.format(_train_res[1].data.cpu().numpy()[0]*100))
print(' Loss : {}'.format(_train_res[2].data))
print(' FW Time : {:.3f}ms'.format(_train_res[3]*1000))
print(' BW Time : {:.3f}ms'.format(_train_res[4]*1000))
print(' Loss : {}'.format(_train_res[1].data))
print(' FW Time : {:.3f}ms'.format(_train_res[2]*1000))
print(' BW Time : {:.3f}ms'.format(_train_res[3]*1000))
if step % args.val_step == args.val_step-1:
# print("\nstep, args.val_step: ", step, args.val_step)
......@@ -85,11 +83,9 @@ def train(**kwargs):
_valid_res = validate(args, model, criterion, valid_loader, step, writer)
print('\n[+] Valid results')
writer.add_scalar('valid/acc1', _valid_res[0], global_step=step)
writer.add_scalar('valid/acc5', _valid_res[1], global_step=step)
writer.add_scalar('valid/loss', _valid_res[2], global_step=step)
writer.add_scalar('valid/loss', _valid_res[1], global_step=step)
print(' Acc@1 : {:.3f}%'.format(_valid_res[0].data.cpu().numpy()[0]*100))
print(' Acc@5 : {:.3f}%'.format(_valid_res[1].data.cpu().numpy()[0]*100))
print(' Loss : {}'.format(_valid_res[2].data))
print(' Loss : {}'.format(_valid_res[1].data))
if _valid_res[0] >= best_acc:
best_acc = _valid_res[0]
......
......@@ -33,6 +33,26 @@ TEST_TARGET_PATH = '/content/drive/My Drive/CD2 Project/data/test_nonaug_classif
current_epoch = 0
def split_dataset(args, dataset, k):
# load dataset
X = list(range(len(dataset)))
Y = dataset.targets
# split to k-fold
assert len(X) == len(Y)
def _it_to_list(_it):
return list(zip(*list(_it)))
sss = StratifiedShuffleSplit(n_splits=k, random_state=args.seed, test_size=0.1)
Dm_indexes, Da_indexes = _it_to_list(sss.split(X, Y))
return Dm_indexes, Da_indexes
def concat_image_features(image, features, max_features=3):
_, h, w = image.shape
#print("\nfsize: ", features.size()) # (1, 240, 240)
......@@ -222,14 +242,6 @@ def get_dataloader(args, dataset, shuffle=False, pin_memory=True):
return data_loader
def get_aug_dataloader(args, dataset, shuffle=False, pin_memory=True):
data_loader = torch.utils.data.DataLoader(dataset,
batch_size=args.batch_size,
shuffle=shuffle,
num_workers=args.num_workers,
pin_memory=pin_memory)
return data_loader
def get_inf_dataloader(args, dataset):
global current_epoch
......@@ -268,9 +280,9 @@ def train_step(args, model, optimizer, scheduler, criterion, batch, step, writer
loss = criterion(output, target)
# measure accuracy and record loss
acc1, acc5 = accuracy(output, target, topk=(1, 5))
acc1 = accuracy(output, target, topk=(1, ))[0]
acc1 /= images.size(0)
acc5 /= images.size(0)
# compute gradient and do SGD step
optimizer.zero_grad()
......@@ -287,10 +299,10 @@ def train_step(args, model, optimizer, scheduler, criterion, batch, step, writer
# writer.add_image(tag,
# concat_image_features(images[j], first[j]), global_step=step)
return acc1, acc5, loss, forward_t, backward_t
return acc1, loss, forward_t, backward_t
#_acc1, _acc5 = accuracy(output, target, topk=(1, 5))
#_acc1= accuracy(output, target, topk=(1,))
def accuracy(output, target, topk=(1,)):
"""Computes the accuracy over the k top predictions for the specified values of k"""
with torch.no_grad():
......@@ -301,7 +313,9 @@ def accuracy(output, target, topk=(1,)):
pred = pred.t()
correct = pred.eq(target.view(1, -1).expand_as(pred))
# print("\noutout: ", output.size()) #(32, 1000)
# print("\npred: ", pred.size()) #(5, 32)
# print("\ncorrect: ", correct.size()) #(5, 32)
res = []
for k in topk:
......@@ -313,7 +327,7 @@ def validate(args, model, criterion, valid_loader, step, writer, device=None):
# switch to evaluate mode
model.eval()
acc1, acc5 = 0, 0
acc1 = 0
samples = 0
infer_t = 0
......@@ -335,13 +349,12 @@ def validate(args, model, criterion, valid_loader, step, writer, device=None):
infer_t += time.time() - start_t
# measure accuracy and record loss
_acc1, _acc5 = accuracy(output, target, topk=(1, 5))
_acc1 = accuracy(output, target, topk=(1, ))[0]
acc1 += _acc1
acc5 += _acc5
samples += images.size(0)
#print("\nsamples: ", samples) 4640
acc1 /= samples
acc5 /= samples
# if writer:
# n_imgs = min(images.size(0), 10)
......@@ -349,4 +362,4 @@ def validate(args, model, criterion, valid_loader, step, writer, device=None):
# writer.add_image('valid/input_image',
# concat_image_features(images[j], first[j]), global_step=step)
return acc1, acc5, loss, infer_t
return acc1, loss, infer_t
......