김성주

ipynb version for google colab & fixed more errors

......@@ -24,10 +24,10 @@ anchor_path = data_path + 'yolo_anchors.txt' # The path of the anchor txt file.
class_name_path = data_path + 'classes.txt' # The path of the class names.
### Training releated numbers
batch_size = 6
batch_size = 10
img_size = [416, 416] # Images will be resized to `img_size` and fed to the network, size format: [width, height]
letterbox_resize = True # Whether to use the letterbox resize, i.e., keep the original aspect ratio in the resized image.
total_epoches = 50
total_epoches = 20
train_evaluation_step = 10 # Evaluate on the training batch after some steps.
val_evaluation_epoch = 2 # Evaluate on the whole validation dataset after some epochs. Set to None to evaluate every epoch.
save_epoch = 5 # Save the model after some epochs.
......@@ -73,7 +73,7 @@ use_label_smooth = True # Whether to use class label smoothing strategy.
use_focal_loss = True # Whether to apply focal loss on the conf loss.
use_mix_up = True # Whether to use mix up data augmentation strategy.
use_warm_up = True # whether to use warm up strategy to prevent from gradient exploding.
warm_up_epoch = 3 # Warm up training epoches. Set to a larger value if gradient explodes.
warm_up_epoch = 2 # Warm up training epoches. Set to a larger value if gradient explodes.
### some constants in validation
# nms
......
......@@ -2,8 +2,13 @@ changes from https://github.com/wizyoung/YOLOv3_TensorFlow
by Seongju Kim, kareus1@khu.ac.kr
I only tested in colab environment yet (2020.05.16),
so let me know if there are some errors/problems in python code version
(##last changed: 2020.05.16)
1] changed TextLineDataset to TFRecordDataset. (also changed data parsing in data utils and eval utils)
2] fixed restore-does-not-exist problem in train/eval mode
3] fixed saver to save the parameter only when save-optimizer option is true
4] changed parameter 'mode' to bool value 'is_training' in data util functions (string value 'mode' is passed as byte string, so functions do not evaluate if-clauses as expected. ex) 'train' != b'train')
5] wrote TFRecord binary iterator, which runs without tf session (references: https://github.com/pgmmpk/tfrecord )
6] removed logging/tenorboard summary code. (I will add it later if necessary)
\ No newline at end of file
......
......@@ -9,22 +9,20 @@ import random
PY_VERSION = sys.version_info[0]
iter_cnt = 0
FEATURE_DESCRIPTION = {
'index': tf.FixedLenFeature([], tf.int64),
'image': tf.FixedLenFeature([], tf.string),
'width': tf.FixedLenFeature([], tf.int64),
'height': tf.FixedLenFeature([], tf.int64),
'boxes': tf.VarLenFeature(tf.int64)
}
def _parse_tfrecord(data):
example = tf.train.Example()
example.ParseFromString(data)
features = example.features.feature
return features
def parse_tfrecord(data):
# tfrecord parser for TFRecordDataset (raw data)
features = tf.parse_single_example(data, FEATURE_DESCRIPTION)
index = int(features['index'])
encoded_image = np.frombuffer(features['image'], dtype = np.uint8)
width = int(features['width'])
height = int(features['height'])
boxes = features['boxes'].eval()
features = _parse_tfrecord(data)
index = features['index'].int64_list.value[0]
encoded_image = np.frombuffer(features['image'].bytes_list.value[0], dtype = np.uint8)
width = features['width'].int64_list.value[0]
height = features['height'].int64_list.value[0]
boxes = features['boxes'].int64_list.value
assert len(boxes) % 5 == 0, 'Annotation error occured in box array.'
box_cnt = len(boxes) // 5
......@@ -33,7 +31,7 @@ def parse_tfrecord(data):
labels = []
for i in range(box_cnt):
label, x_min, y_min, x_max, y_max = int(boxes[i * 5]), float(boxes[i * 5 + 1]), float(boxes[i * 5 + 2]), float(boxes[i * 5 + 3]) ## do we need to change int to float? is there float rectangle sample?
label, x_min, y_min, x_max, y_max = int(boxes[i * 5]), float(boxes[i * 5 + 1]), float(boxes[i * 5 + 2]), float(boxes[i * 5 + 3]), float(boxes[i * 5 + 4]) ## do we need to change int to float? is there float rectangle sample?
aligned_boxes.append([x_min, y_min, x_max, y_max])
labels.append(label)
......
......@@ -99,6 +99,8 @@ with tf.Session() as sess:
sess.run([tf.global_variables_initializer()])
if os.path.exists(args.restore_path):
saver_to_restore.restore(sess, args.restore_path)
else:
raise ValueError('there is no model to evaluate. You should move/create the checkpoint file to restore path')
print('\nStart evaluation...\n')
......
......@@ -22,18 +22,18 @@ pred_scores_flag = tf.placeholder(tf.float32, [1, None, None])
gpu_nms_op = gpu_nms(pred_boxes_flag, pred_scores_flag, args.class_num, args.nms_topk, args.score_threshold, args.nms_threshold)
### tf.data pipeline
train_dataset = tf.data.TFRecordDataset(filenames=train_file, compression_type='GZIP')
train_dataset = train_dataset.shuffle(train_img_cnt)
train_dataset = train_dataset.batch(batch_size)
train_dataset = tf.data.TFRecordDataset(filenames=args.train_file, compression_type='GZIP')
train_dataset = train_dataset.shuffle(args.train_img_cnt)
train_dataset = train_dataset.batch(args.batch_size)
train_dataset = train_dataset.map(
lambda x: tf.py_func(get_batch_data,
inp=[x, args.class_num, args.img_size, args.anchors, True, args.multi_scale_train, args.use_mix_up, args.letterbox_resize],
Tout=[tf.int64, tf.float32, tf.float32, tf.float32, tf.float32]),
num_parallel_calls=args.num_threads
)
train_dataset = train_dataset.prefetch(prefetech_buffer)
train_dataset = train_dataset.prefetch(args.prefetech_buffer)
val_dataset = tf.data.TFRecordDataset(filenames=val_file, compression_type='GZIP')
val_dataset = tf.data.TFRecordDataset(filenames=args.val_file, compression_type='GZIP')
val_dataset = val_dataset.batch(1)
val_dataset = val_dataset.map(
lambda x: tf.py_func(get_batch_data,
......@@ -41,7 +41,7 @@ val_dataset = val_dataset.map(
Tout=[tf.int64, tf.float32, tf.float32, tf.float32, tf.float32]),
num_parallel_calls=args.num_threads
)
val_dataset.prefetch(prefetech_buffer)
val_dataset.prefetch(args.prefetech_buffer)
iterator = tf.data.Iterator.from_structure(train_dataset.output_types, train_dataset.output_shapes)
train_init_op = iterator.make_initializer(train_dataset)
......@@ -71,13 +71,13 @@ saver_to_restore = tf.train.Saver(var_list=tf.contrib.framework.get_variables_to
update_vars = tf.contrib.framework.get_variables_to_restore(include=update_part)
global_step = tf.Variable(float(global_step), trainable=False, collections=[tf.GraphKeys.LOCAL_VARIABLES])
global_step = tf.Variable(float(args.global_step), trainable=False, collections=[tf.GraphKeys.LOCAL_VARIABLES])
if use_warm_up:
learning_rate = tf.cond(tf.less(global_step, train_batch_num * warm_up_epoch),
lambda: learning_rate_init * global_step / (train_batch_num * warm_up_epoch),
lambda: config_learning_rate(global_step - args.train_batch_num * args.warm_up_epoch))
lambda: config_learning_rate(args, global_step - args.train_batch_num * args.warm_up_epoch))
else:
learning_rate = config_learning_rate(global_step)
learning_rate = config_learning_rate(args, global_step)
optimizer = config_optimizer(args.optimizer_name, learning_rate)
......@@ -105,7 +105,7 @@ with tf.Session() as sess:
if os.path.exists(args.restore_path):
saver_to_restore.restore(sess, args.restore_path)
print('\nStart training...\n')
print('\nStart training...: Total epoches =', args.total_epoches, '\n')
best_mAP = -np.Inf
......@@ -163,7 +163,7 @@ with tf.Session() as sess:
# calc mAP
rec_total, prec_total, ap_total = AverageMeter(), AverageMeter(), AverageMeter()
gt_dict = parse_gt_rec(args.val_file, args.img_size, args.letterbox_resize)
gt_dict = parse_gt_rec(args.val_file, 'GZIP', args.img_size, args.letterbox_resize)
info = '======> Epoch: {}, global_step: {}, lr: {:.6g} <======\n'.format(epoch, __global_step, __lr)
......
This diff is collapsed. Click to expand it.