Toggle navigation
Toggle navigation
This project
Loading...
Sign in
2020-1-capstone-design2
/
2016104167
Go to a project
Toggle navigation
Toggle navigation pinning
Projects
Groups
Snippets
Help
Project
Activity
Repository
Pipelines
Graphs
Issues
0
Merge Requests
0
Wiki
Snippets
Network
Create a new issue
Builds
Commits
Issue Boards
Authored by
조현아
2020-04-26 00:15:24 +0900
Browse Files
Options
Browse Files
Download
Email Patches
Plain Diff
Commit
a5ce1660d3d53bce368d4a554286dc1f3586f168
a5ce1660
1 parent
c4e3768c
resolved classify acc err
Show whitespace changes
Inline
Side-by-side
Showing
3 changed files
with
102 additions
and
49 deletions
code/classifier/eval.py
code/classifier/train.py
code/classifier/utils.py
code/classifier/eval.py
View file @
a5ce166
...
...
@@ -5,19 +5,19 @@ from pprint import pprint
import
torch
import
torch.nn
as
nn
from
torch.utils.tensorboard
import
SummaryWriter
import
torchvision.transforms
as
transforms
#from torch.utils.tensorboard import SummaryWriter
from
utils
import
*
# command
# python eval.py --model_path='logs/April_16_00:26:10__resnet50__None/'
def
eval
(
model_path
,
num_data
):
def
eval
(
model_path
):
print
(
'
\n
[+] Parse arguments'
)
kwargs_path
=
os
.
path
.
join
(
model_path
,
'kwargs.json'
)
kwargs
=
json
.
loads
(
open
(
kwargs_path
)
.
read
())
args
,
kwargs
=
parse_args
(
kwargs
)
args
.
batch_size
=
num_data
pprint
(
args
)
device
=
torch
.
device
(
'cuda'
if
args
.
use_cuda
else
'cpu'
)
...
...
@@ -35,23 +35,25 @@ def eval(model_path, num_data):
model
.
load_state_dict
(
torch
.
load
(
weight_path
))
print
(
'
\n
[+] Load dataset'
)
test_dataset
=
get_dataset
(
args
,
'test'
)
transform
=
transforms
.
Compose
([
transforms
.
Resize
([
240
,
240
]),
transforms
.
ToTensor
()
])
test_dataset
=
get_dataset
(
args
,
transform
,
'test'
)
test_loader
=
iter
(
get_dataloader
(
args
,
test_dataset
))
###
print
(
'
\n
[+] Start testing'
)
writer
=
SummaryWriter
(
log_dir
=
model_path
)
_test_res
=
validate
(
args
,
model
,
criterion
,
test_loader
,
step
=
0
,
writer
=
writer
)
#
print('\n[+] Start testing')
#
writer = SummaryWriter(log_dir=model_path)
_test_res
=
validate
(
args
,
model
,
criterion
,
test_loader
,
step
=
0
)
print
(
'
\n
[+] Valid results'
)
print
(
' Acc@1 : {:.3f}
%
'
.
format
(
_test_res
[
0
]
.
data
.
cpu
()
.
numpy
()[
0
]
*
100
))
print
(
' Acc@5 : {:.3f}
%
'
.
format
(
_test_res
[
1
]
.
data
.
cpu
()
.
numpy
()[
0
]
*
100
))
print
(
' Acc_all : {:.3f}
%
'
.
format
(
_test_res
[
2
]
.
data
.
cpu
()
.
numpy
()[
0
]
*
100
))
print
(
' Loss : {:.3f}'
.
format
(
_test_res
[
3
]
.
data
))
print
(
' Infer Time(per image) : {:.3f}ms'
.
format
(
_test_res
[
4
]
*
1000
/
len
(
test_dataset
)))
print
(
' Loss : {:.3f}'
.
format
(
_test_res
[
1
]
.
data
))
print
(
' Infer Time(per image) : {:.3f}ms'
.
format
(
_test_res
[
2
]
*
1000
/
len
(
test_dataset
)))
writer
.
close
()
#
writer.close()
if
__name__
==
'__main__'
:
fire
.
Fire
(
eval
)
...
...
code/classifier/train.py
View file @
a5ce166
...
...
@@ -7,7 +7,7 @@ from pprint import pprint
import
torch.nn
as
nn
import
torch.backends.cudnn
as
cudnn
from
torch.utils.tensorboard
import
SummaryWriter
#
from torch.utils.tensorboard import SummaryWriter
from
networks
import
*
from
utils
import
*
...
...
@@ -27,7 +27,7 @@ def train(**kwargs):
log_dir
=
os
.
path
.
join
(
'/content/drive/My Drive/CD2 Project/runs/classify/'
,
model_name
)
os
.
makedirs
(
os
.
path
.
join
(
log_dir
,
'model'
))
json
.
dump
(
kwargs
,
open
(
os
.
path
.
join
(
log_dir
,
'kwargs.json'
),
'w'
))
writer
=
SummaryWriter
(
log_dir
=
log_dir
)
#
writer = SummaryWriter(log_dir=log_dir)
if
args
.
seed
is
not
None
:
random
.
seed
(
args
.
seed
)
...
...
@@ -45,8 +45,10 @@ def train(**kwargs):
#writer.add_graph(model)
print
(
'
\n
[+] Load dataset'
)
train_dataset
=
get_dataset
(
args
,
'train'
)
valid_dataset
=
get_dataset
(
args
,
'val'
)
transform
=
get_train_transform
(
args
,
model
,
log_dir
)
val_transform
=
get_valid_transform
(
args
,
model
)
train_dataset
=
get_dataset
(
args
,
transform
,
'train'
)
valid_dataset
=
get_dataset
(
args
,
val_transform
,
'val'
)
train_loader
=
iter
(
get_inf_dataloader
(
args
,
train_dataset
))
max_epoch
=
len
(
train_dataset
)
//
args
.
batch_size
best_acc
=
-
1
...
...
@@ -62,16 +64,16 @@ def train(**kwargs):
start_t
=
time
.
time
()
for
step
in
range
(
args
.
start_step
,
args
.
max_step
):
batch
=
next
(
train_loader
)
_train_res
=
train_step
(
args
,
model
,
optimizer
,
scheduler
,
criterion
,
batch
,
step
,
writer
)
_train_res
=
train_step
(
args
,
model
,
optimizer
,
scheduler
,
criterion
,
batch
,
step
)
if
step
%
args
.
print_step
==
0
:
print
(
'
\n
[+] Training step: {}/{}
\t
Training epoch: {}/{}
\t
Elapsed time: {:.2f}min
\t
Learning rate: {}'
.
format
(
step
,
args
.
max_step
,
current_epoch
,
max_epoch
,
(
time
.
time
()
-
start_t
)
/
60
,
optimizer
.
param_groups
[
0
][
'lr'
]))
writer
.
add_scalar
(
'train/learning_rate'
,
optimizer
.
param_groups
[
0
][
'lr'
],
global_step
=
step
)
writer
.
add_scalar
(
'train/acc1'
,
_train_res
[
0
],
global_step
=
step
)
writer
.
add_scalar
(
'train/loss'
,
_train_res
[
1
],
global_step
=
step
)
writer
.
add_scalar
(
'train/forward_time'
,
_train_res
[
2
],
global_step
=
step
)
writer
.
add_scalar
(
'train/backward_time'
,
_train_res
[
3
],
global_step
=
step
)
#
writer.add_scalar('train/learning_rate', optimizer.param_groups[0]['lr'], global_step=step)
#
writer.add_scalar('train/acc1', _train_res[0], global_step=step)
#
writer.add_scalar('train/loss', _train_res[1], global_step=step)
#
writer.add_scalar('train/forward_time', _train_res[2], global_step=step)
#
writer.add_scalar('train/backward_time', _train_res[3], global_step=step)
print
(
' Acc@1 : {:.3f}
%
'
.
format
(
_train_res
[
0
]
.
data
.
cpu
()
.
numpy
()[
0
]
*
100
))
print
(
' Loss : {}'
.
format
(
_train_res
[
1
]
.
data
))
print
(
' FW Time : {:.3f}ms'
.
format
(
_train_res
[
2
]
*
1000
))
...
...
@@ -80,10 +82,10 @@ def train(**kwargs):
if
step
%
args
.
val_step
==
args
.
val_step
-
1
:
# print("\nstep, args.val_step: ", step, args.val_step)
valid_loader
=
iter
(
get_dataloader
(
args
,
valid_dataset
))
_valid_res
=
validate
(
args
,
model
,
criterion
,
valid_loader
,
step
,
writer
)
print
(
'
\n
[+]
Valid results'
)
writer
.
add_scalar
(
'valid/acc1'
,
_valid_res
[
0
],
global_step
=
step
)
writer
.
add_scalar
(
'valid/loss'
,
_valid_res
[
1
],
global_step
=
step
)
_valid_res
=
validate
(
args
,
model
,
criterion
,
valid_loader
,
step
)
print
(
'
\n
[+]
(Valid results) Valid step: {}/{}'
.
format
(
step
,
args
.
max_step
)
)
#
writer.add_scalar('valid/acc1', _valid_res[0], global_step=step)
#
writer.add_scalar('valid/loss', _valid_res[1], global_step=step)
print
(
' Acc@1 : {:.3f}
%
'
.
format
(
_valid_res
[
0
]
.
data
.
cpu
()
.
numpy
()[
0
]
*
100
))
print
(
' Loss : {}'
.
format
(
_valid_res
[
1
]
.
data
))
...
...
@@ -92,7 +94,7 @@ def train(**kwargs):
torch
.
save
(
model
.
state_dict
(),
os
.
path
.
join
(
log_dir
,
"model"
,
"model.pt"
))
print
(
'
\n
[+] Model saved'
)
writer
.
close
()
#
writer.close()
if
__name__
==
'__main__'
:
...
...
code/classifier/utils.py
View file @
a5ce166
...
...
@@ -23,6 +23,7 @@ from sklearn.model_selection import KFold
from
networks
import
basenet
,
grayResNet2
DATASET_PATH
=
'/content/drive/My Drive/CD2 Project/'
TRAIN_DATASET_PATH
=
'/content/drive/My Drive/CD2 Project/data/nonaug+Normal_train/'
TRAIN_TARGET_PATH
=
'/content/drive/My Drive/CD2 Project/data/train_nonaug_classify_target.csv'
...
...
@@ -131,17 +132,17 @@ def parse_args(kwargs):
kwargs
[
'dataset'
]
=
kwargs
[
'dataset'
]
if
'dataset'
in
kwargs
else
'BraTS'
kwargs
[
'network'
]
=
kwargs
[
'network'
]
if
'network'
in
kwargs
else
'resnet50'
kwargs
[
'optimizer'
]
=
kwargs
[
'optimizer'
]
if
'optimizer'
in
kwargs
else
'adam'
kwargs
[
'learning_rate'
]
=
kwargs
[
'learning_rate'
]
if
'learning_rate'
in
kwargs
else
0.0
0
1
kwargs
[
'learning_rate'
]
=
kwargs
[
'learning_rate'
]
if
'learning_rate'
in
kwargs
else
0.01
kwargs
[
'seed'
]
=
kwargs
[
'seed'
]
if
'seed'
in
kwargs
else
None
kwargs
[
'use_cuda'
]
=
kwargs
[
'use_cuda'
]
if
'use_cuda'
in
kwargs
else
True
kwargs
[
'use_cuda'
]
=
kwargs
[
'use_cuda'
]
and
torch
.
cuda
.
is_available
()
kwargs
[
'num_workers'
]
=
kwargs
[
'num_workers'
]
if
'num_workers'
in
kwargs
else
4
kwargs
[
'print_step'
]
=
kwargs
[
'print_step'
]
if
'print_step'
in
kwargs
else
10
0
kwargs
[
'val_step'
]
=
kwargs
[
'val_step'
]
if
'val_step'
in
kwargs
else
10
0
kwargs
[
'print_step'
]
=
kwargs
[
'print_step'
]
if
'print_step'
in
kwargs
else
5
0
kwargs
[
'val_step'
]
=
kwargs
[
'val_step'
]
if
'val_step'
in
kwargs
else
5
0
kwargs
[
'scheduler'
]
=
kwargs
[
'scheduler'
]
if
'scheduler'
in
kwargs
else
'exp'
kwargs
[
'batch_size'
]
=
kwargs
[
'batch_size'
]
if
'batch_size'
in
kwargs
else
32
kwargs
[
'batch_size'
]
=
kwargs
[
'batch_size'
]
if
'batch_size'
in
kwargs
else
16
kwargs
[
'start_step'
]
=
kwargs
[
'start_step'
]
if
'start_step'
in
kwargs
else
0
kwargs
[
'max_step'
]
=
kwargs
[
'max_step'
]
if
'max_step'
in
kwargs
else
2
500
kwargs
[
'max_step'
]
=
kwargs
[
'max_step'
]
if
'max_step'
in
kwargs
else
500
kwargs
[
'augment_path'
]
=
kwargs
[
'augment_path'
]
if
'augment_path'
in
kwargs
else
None
# to named tuple
...
...
@@ -155,11 +156,10 @@ def select_model(args):
'resnet50'
:
grayResNet2
.
resnet50
(),
'resnet101'
:
grayResNet2
.
resnet101
(),
'resnet152'
:
grayResNet2
.
resnet152
()}
if
args
.
network
in
resnet_dict
:
backbone
=
resnet_dict
[
args
.
network
]
model
=
basenet
.
BaseNet
(
backbone
,
args
)
else
:
Net
=
getattr
(
importlib
.
import_module
(
'networks.{}'
.
format
(
args
.
network
)),
'Net'
)
model
=
Net
(
args
)
model
=
resnet_dict
[
args
.
network
]
# else: # 3 channels
# backbone = models.__dict__[args.network]()
# model = basenet.BaseNet(backbone, args)
#print(model) # print model architecture
return
model
...
...
@@ -187,16 +187,44 @@ def select_scheduler(args, optimizer):
else
:
raise
Exception
(
'Unknown Scheduler'
)
def
get_train_transform
(
args
,
model
,
transform
,
log_dir
=
None
):
if
args
.
dataset
==
'cifar10'
:
transform
=
transforms
.
Compose
([
transforms
.
Pad
(
4
),
transforms
.
RandomCrop
(
32
),
transforms
.
RandomHorizontalFlip
(),
transforms
.
ToTensor
()
])
else
:
transform
=
transforms
.
Compose
([
transforms
.
Resize
([
240
,
240
]),
transforms
.
ToTensor
()
])
return
transform
def
get_valid_transform
(
args
,
model
):
if
args
.
dataset
==
'cifar10'
:
val_transform
=
transforms
.
Compose
([
transforms
.
Resize
(
32
),
transforms
.
ToTensor
()
])
else
:
val_transform
=
transforms
.
Compose
([
transforms
.
Resize
([
240
,
240
]),
transforms
.
ToTensor
()
])
return
val_transform
class
CustomDataset
(
Dataset
):
def
__init__
(
self
,
data_path
,
csv_path
):
def
__init__
(
self
,
data_path
,
csv_path
,
transform
):
self
.
path
=
data_path
self
.
imgs
=
natsorted
(
os
.
listdir
(
data_path
))
self
.
len
=
len
(
self
.
imgs
)
self
.
transform
=
transforms
.
Compose
([
transforms
.
Resize
([
240
,
240
]),
transforms
.
ToTensor
()
])
self
.
transform
=
transform
df
=
pd
.
read_csv
(
csv_path
)
targets_list
=
[]
...
...
@@ -215,6 +243,7 @@ class CustomDataset(Dataset):
targets
=
self
.
targets
[
idx
]
image
=
Image
.
open
(
img_loc
)
image
=
self
.
transform
(
image
)
#print("\n idx, img, targets: ", idx, img_loc, targets)
return
image
,
targets
...
...
@@ -222,12 +251,32 @@ class CustomDataset(Dataset):
def
get_dataset
(
args
,
transform
,
split
=
'train'
):
assert
split
in
[
'train'
,
'val'
,
'test'
]
if
args
.
dataset
==
'cifar10'
:
train
=
split
in
[
'train'
,
'val'
,
'trainval'
]
dataset
=
torchvision
.
datasets
.
CIFAR10
(
DATASET_PATH
,
train
=
train
,
transform
=
transform
,
download
=
True
)
if
split
in
[
'train'
,
'val'
]:
split_path
=
os
.
path
.
join
(
DATASET_PATH
,
'cifar-10-batches-py'
,
'train_val_index.cp'
)
if
not
os
.
path
.
exists
(
split_path
):
[
train_index
],
[
val_index
]
=
split_dataset
(
args
,
dataset
,
k
=
1
)
split_index
=
{
'train'
:
train_index
,
'val'
:
val_index
}
cp
.
dump
(
split_index
,
open
(
split_path
,
'wb'
))
split_index
=
cp
.
load
(
open
(
split_path
,
'rb'
))
dataset
=
Subset
(
dataset
,
split_index
[
split
])
else
:
if
split
in
[
'train'
]:
dataset
=
CustomDataset
(
TRAIN_DATASET_PATH
,
TRAIN_TARGET_PATH
)
dataset
=
CustomDataset
(
TRAIN_DATASET_PATH
,
TRAIN_TARGET_PATH
,
transform
)
elif
split
in
[
'val'
]:
dataset
=
CustomDataset
(
VAL_DATASET_PATH
,
VAL_TARGET_PATH
)
dataset
=
CustomDataset
(
VAL_DATASET_PATH
,
VAL_TARGET_PATH
,
transform
)
else
:
# test
dataset
=
CustomDataset
(
TEST_DATASET_PATH
,
TEST_TARGET_PATH
)
dataset
=
CustomDataset
(
TEST_DATASET_PATH
,
TEST_TARGET_PATH
,
transform
)
return
dataset
...
...
@@ -261,7 +310,7 @@ def get_inf_dataloader(args, dataset):
def
train_step
(
args
,
model
,
optimizer
,
scheduler
,
criterion
,
batch
,
step
,
writer
,
device
=
None
):
def
train_step
(
args
,
model
,
optimizer
,
scheduler
,
criterion
,
batch
,
step
,
device
=
None
):
model
.
train
()
images
,
target
=
batch
...
...
@@ -275,7 +324,7 @@ def train_step(args, model, optimizer, scheduler, criterion, batch, step, writer
# compute output
start_t
=
time
.
time
()
output
,
first
=
model
(
images
)
output
=
model
(
images
)
forward_t
=
time
.
time
()
-
start_t
loss
=
criterion
(
output
,
target
)
...
...
@@ -323,7 +372,7 @@ def accuracy(output, target, topk=(1,)):
res
.
append
(
correct_k
)
return
res
def
validate
(
args
,
model
,
criterion
,
valid_loader
,
step
,
writer
,
device
=
None
):
def
validate
(
args
,
model
,
criterion
,
valid_loader
,
step
,
device
=
None
):
# switch to evaluate mode
model
.
eval
()
...
...
@@ -344,7 +393,7 @@ def validate(args, model, criterion, valid_loader, step, writer, device=None):
target
=
target
.
cuda
(
non_blocking
=
True
)
# compute output
output
,
first
=
model
(
images
)
output
=
model
(
images
)
loss
=
criterion
(
output
,
target
)
infer_t
+=
time
.
time
()
-
start_t
...
...
Please
register
or
login
to post a comment