Showing
3 changed files
with
11 additions
and
5 deletions
... | @@ -5,6 +5,7 @@ from pprint import pprint | ... | @@ -5,6 +5,7 @@ from pprint import pprint |
5 | 5 | ||
6 | import torch | 6 | import torch |
7 | import torch.nn as nn | 7 | import torch.nn as nn |
8 | +from torch.utils.tensorboard import SummaryWriter | ||
8 | 9 | ||
9 | from utils import * | 10 | from utils import * |
10 | 11 | ||
... | @@ -35,7 +36,9 @@ def eval(model_path): | ... | @@ -35,7 +36,9 @@ def eval(model_path): |
35 | test_loader = iter(get_dataloader(args, test_dataset)) | 36 | test_loader = iter(get_dataloader(args, test_dataset)) |
36 | 37 | ||
37 | print('\n[+] Start testing') | 38 | print('\n[+] Start testing') |
38 | - _test_res = validate(args, model, criterion, test_loader, step=0, writer=None) | 39 | + log_dir = os.path.join('/content/drive/My Drive/CD2 Project/runs', model_name) |
40 | + writer = SummaryWriter(log_dir=log_dir) | ||
41 | + _test_res = validate(args, model, criterion, test_loader, step=0, writer=writer) | ||
39 | 42 | ||
40 | print('\n[+] Valid results') | 43 | print('\n[+] Valid results') |
41 | print(' Acc@1 : {:.3f}%'.format(_test_res[0].data.cpu().numpy()[0]*100)) | 44 | print(' Acc@1 : {:.3f}%'.format(_test_res[0].data.cpu().numpy()[0]*100)) |
... | @@ -43,6 +46,7 @@ def eval(model_path): | ... | @@ -43,6 +46,7 @@ def eval(model_path): |
43 | print(' Loss : {:.3f}'.format(_test_res[2].data)) | 46 | print(' Loss : {:.3f}'.format(_test_res[2].data)) |
44 | print(' Infer Time(per image) : {:.3f}ms'.format(_test_res[3]*1000 / len(test_dataset))) | 47 | print(' Infer Time(per image) : {:.3f}ms'.format(_test_res[3]*1000 / len(test_dataset))) |
45 | 48 | ||
49 | + writer.close() | ||
46 | 50 | ||
47 | if __name__ == '__main__': | 51 | if __name__ == '__main__': |
48 | fire.Fire(eval) | 52 | fire.Fire(eval) | ... | ... |
... | @@ -54,6 +54,9 @@ def train(**kwargs): | ... | @@ -54,6 +54,9 @@ def train(**kwargs): |
54 | if torch.cuda.device_count() > 1: | 54 | if torch.cuda.device_count() > 1: |
55 | print('\n[+] Use {} GPUs'.format(torch.cuda.device_count())) | 55 | print('\n[+] Use {} GPUs'.format(torch.cuda.device_count())) |
56 | model = nn.DataParallel(model) | 56 | model = nn.DataParallel(model) |
57 | + | ||
58 | + print('\n[+] Use {} GPUs'.format(torch.cuda.device_count())) | ||
59 | + print('\n[+] Using GPU: {} '.format(torch.cuda.get_device_name(0))) | ||
57 | 60 | ||
58 | start_t = time.time() | 61 | start_t = time.time() |
59 | for step in range(args.start_step, args.max_step): | 62 | for step in range(args.start_step, args.max_step): |
... | @@ -86,7 +89,7 @@ def train(**kwargs): | ... | @@ -86,7 +89,7 @@ def train(**kwargs): |
86 | print(' Acc@5 : {:.3f}%'.format(_valid_res[1].data.cpu().numpy()[0]*100)) | 89 | print(' Acc@5 : {:.3f}%'.format(_valid_res[1].data.cpu().numpy()[0]*100)) |
87 | print(' Loss : {}'.format(_valid_res[2].data)) | 90 | print(' Loss : {}'.format(_valid_res[2].data)) |
88 | 91 | ||
89 | - if _valid_res[0] > best_acc: | 92 | + if _valid_res[0] >= best_acc: |
90 | best_acc = _valid_res[0] | 93 | best_acc = _valid_res[0] |
91 | torch.save(model.state_dict(), os.path.join(log_dir, "model","model.pt")) | 94 | torch.save(model.state_dict(), os.path.join(log_dir, "model","model.pt")) |
92 | print('\n[+] Model saved') | 95 | print('\n[+] Model saved') | ... | ... |
... | @@ -86,7 +86,7 @@ def concat_image_features(image, features, max_features=3): | ... | @@ -86,7 +86,7 @@ def concat_image_features(image, features, max_features=3): |
86 | feature = feature.view(1, h, w) #(3, h, w) input of size 3072 | 86 | feature = feature.view(1, h, w) #(3, h, w) input of size 3072 |
87 | # torch.Size([3, 32, 32])->[1, 32, 32] | 87 | # torch.Size([3, 32, 32])->[1, 32, 32] |
88 | 88 | ||
89 | - print("img_feature & feature size:\n", image_feature.size(),"\n", feature.size()) | 89 | + #print("img_feature & feature size:\n", image_feature.size(),"\n", feature.size()) |
90 | # img_feature & feature size: | 90 | # img_feature & feature size: |
91 | # torch.Size([1, 32, 32]) -> [1, 32, 64] | 91 | # torch.Size([1, 32, 32]) -> [1, 32, 64] |
92 | # torch.Size([3, 32, 32] ->[1, 32, 32] | 92 | # torch.Size([3, 32, 32] ->[1, 32, 32] |
... | @@ -395,7 +395,6 @@ def get_valid_transform(args, model): | ... | @@ -395,7 +395,6 @@ def get_valid_transform(args, model): |
395 | 395 | ||
396 | def train_step(args, model, optimizer, scheduler, criterion, batch, step, writer, device=None): | 396 | def train_step(args, model, optimizer, scheduler, criterion, batch, step, writer, device=None): |
397 | model.train() | 397 | model.train() |
398 | - #print('\nBatch\n', batch) | ||
399 | images, target = batch | 398 | images, target = batch |
400 | 399 | ||
401 | if device: | 400 | if device: |
... | @@ -476,7 +475,7 @@ def validate(args, model, criterion, valid_loader, step, writer, device=None): | ... | @@ -476,7 +475,7 @@ def validate(args, model, criterion, valid_loader, step, writer, device=None): |
476 | 475 | ||
477 | return acc1, acc5, loss, infer_t | 476 | return acc1, acc5, loss, infer_t |
478 | 477 | ||
479 | - | 478 | +#_acc1, _acc5 = accuracy(output, target, topk=(1, 5)) |
480 | def accuracy(output, target, topk=(1,)): | 479 | def accuracy(output, target, topk=(1,)): |
481 | """Computes the accuracy over the k top predictions for the specified values of k""" | 480 | """Computes the accuracy over the k top predictions for the specified values of k""" |
482 | with torch.no_grad(): | 481 | with torch.no_grad(): | ... | ... |
-
Please register or login to post a comment