윤영빈

면담보고서, model parameter adjusted

...@@ -407,7 +407,7 @@ class NetVLADModelLF(models.BaseModel): ...@@ -407,7 +407,7 @@ class NetVLADModelLF(models.BaseModel):
407 random_frames = True 407 random_frames = True
408 cluster_size = 64 408 cluster_size = 64
409 hidden1_size = 1024 409 hidden1_size = 1024
410 - relu = False 410 + relu = True
411 dimred = -1 411 dimred = -1
412 gating = True 412 gating = True
413 remove_diag = False 413 remove_diag = False
......
...@@ -75,7 +75,7 @@ if __name__ == "__main__": ...@@ -75,7 +75,7 @@ if __name__ == "__main__":
75 flags.DEFINE_integer( 75 flags.DEFINE_integer(
76 "num_gpu", 1, "The maximum number of GPU devices to use for training. " 76 "num_gpu", 1, "The maximum number of GPU devices to use for training. "
77 "Flag only applies if GPUs are installed") 77 "Flag only applies if GPUs are installed")
78 - flags.DEFINE_integer("batch_size", 256, 78 + flags.DEFINE_integer("batch_size", 128,
79 "How many examples to process per batch for training.") 79 "How many examples to process per batch for training.")
80 flags.DEFINE_string("label_loss", "CrossEntropyLoss", 80 flags.DEFINE_string("label_loss", "CrossEntropyLoss",
81 "Which loss function to use for training the model.") 81 "Which loss function to use for training the model.")
...@@ -83,24 +83,24 @@ if __name__ == "__main__": ...@@ -83,24 +83,24 @@ if __name__ == "__main__":
83 "regularization_penalty", 1.0, 83 "regularization_penalty", 1.0,
84 "How much weight to give to the regularization loss (the label loss has " 84 "How much weight to give to the regularization loss (the label loss has "
85 "a weight of 1).") 85 "a weight of 1).")
86 - flags.DEFINE_float("base_learning_rate", 0.01, 86 + flags.DEFINE_float("base_learning_rate", 0.0006,
87 "Which learning rate to start with.") 87 "Which learning rate to start with.")
88 flags.DEFINE_float( 88 flags.DEFINE_float(
89 - "learning_rate_decay", 0.95, 89 + "learning_rate_decay", 0.8,
90 "Learning rate decay factor to be applied every " 90 "Learning rate decay factor to be applied every "
91 "learning_rate_decay_examples.") 91 "learning_rate_decay_examples.")
92 flags.DEFINE_float( 92 flags.DEFINE_float(
93 - "learning_rate_decay_examples", 4000000, 93 + "learning_rate_decay_examples", 100,
94 "Multiply current learning rate by learning_rate_decay " 94 "Multiply current learning rate by learning_rate_decay "
95 "every learning_rate_decay_examples.") 95 "every learning_rate_decay_examples.")
96 flags.DEFINE_integer( 96 flags.DEFINE_integer(
97 - "num_epochs", 100, "How many passes to make over the dataset before " 97 + "num_epochs", 5, "How many passes to make over the dataset before "
98 "halting training.") 98 "halting training.")
99 flags.DEFINE_integer( 99 flags.DEFINE_integer(
100 "max_steps", None, 100 "max_steps", None,
101 "The maximum number of iterations of the training loop.") 101 "The maximum number of iterations of the training loop.")
102 flags.DEFINE_integer( 102 flags.DEFINE_integer(
103 - "export_model_steps", 1, 103 + "export_model_steps", 100,
104 "The period, in number of steps, with which the model " 104 "The period, in number of steps, with which the model "
105 "is exported for batch prediction.") 105 "is exported for batch prediction.")
106 106
......