Showing
14 changed files
with
91 additions
and
48 deletions
... | @@ -18,8 +18,9 @@ import shlex | ... | @@ -18,8 +18,9 @@ import shlex |
18 | import json | 18 | import json |
19 | # Create your views here. | 19 | # Create your views here. |
20 | import sys | 20 | import sys |
21 | -sys.path.insert(0, "/khuhub/2015104192/web/backend/yt8m/esot3ria") | 21 | +sys.path.insert(0, "/mnt/e/khuhub/2015104192/web/backend/yt8m/esot3ria") |
22 | import inference_pb | 22 | import inference_pb |
23 | +import pb_util as pbutil | ||
23 | 24 | ||
24 | def with_ffprobe(filename): | 25 | def with_ffprobe(filename): |
25 | 26 | ||
... | @@ -77,16 +78,16 @@ class VideoFileUploadView(APIView): | ... | @@ -77,16 +78,16 @@ class VideoFileUploadView(APIView): |
77 | file_serializer.save() | 78 | file_serializer.save() |
78 | # 동영상 길이 출력 | 79 | # 동영상 길이 출력 |
79 | fileFullName = file_serializer.data['file_save_name'] | 80 | fileFullName = file_serializer.data['file_save_name'] |
80 | - fileProcessedName = fileFullName.replace("%3A",":") | 81 | + print('FILE======================================================= ' + fileFullName) |
82 | + fileProcessedName = fileFullName.replace("E%3A","") | ||
83 | + print('FILE======================================================= ' + fileProcessedName) | ||
81 | runTime = with_ffprobe(fileProcessedName) | 84 | runTime = with_ffprobe(fileProcessedName) |
82 | - print('RUNTIME ::::==== ' + str(runTime)) | ||
83 | print(threshold) | 85 | print(threshold) |
84 | - print('CURR DIR = ' + os.getcwd()) | 86 | + print('runtime = ' + str(runTime)) |
85 | - process = subprocess.Popen(['./runMediaPipe.sh %s %s' %(fileProcessedName,runTime,)], shell = True) | 87 | + process = subprocess.Popen(["sh runMediaPipe.sh %s %d " %(fileProcessedName,runTime)], shell = True) |
86 | process.wait() | 88 | process.wait() |
87 | - | 89 | + print("NOW DONE name = " + fileProcessedName + " / thresh = " + str(threshold)) |
88 | - | 90 | + result = inference_pb.inference_pb("/tmp/mediapipe/features.pb", threshold) |
89 | - result = inference_pb.inference_pb('E:/khuhub/2015104192/web/backend/yt8m/featuremaps/features(baby_samoyed).pb', threshold) | ||
90 | 91 | ||
91 | return Response(result, status=status.HTTP_201_CREATED) | 92 | return Response(result, status=status.HTTP_201_CREATED) |
92 | else: | 93 | else: | ... | ... |
This file is too large to display.
web/backend/cdTest.sh
0 → 100644
1 | #!/bin/bash | 1 | #!/bin/bash |
2 | -cd ../../../../mediapipe | 2 | +echo "${PWD}" |
3 | +cd ../../mediapipe/mediapipe | ||
3 | . venv/bin/activate | 4 | . venv/bin/activate |
4 | -echo "STEP1" | 5 | +echo "activated" |
5 | -bazel version && \ | 6 | +/usr/local/bazel/3.4.0/lib/bazel/bin/bazel version && \ |
6 | -alias bazel='bazel' | 7 | +alias bazel='/usr/local/bazel/3.4.0/lib/bazel/bin/bazel' |
7 | - | ||
8 | -echo "STEP2" | ||
9 | - | ||
10 | python -m mediapipe.examples.desktop.youtube8m.generate_input_sequence_example \ | 8 | python -m mediapipe.examples.desktop.youtube8m.generate_input_sequence_example \ |
11 | --path_to_input_video=/$1 \ | 9 | --path_to_input_video=/$1 \ |
12 | --clip_end_time_sec=$2 | 10 | --clip_end_time_sec=$2 |
13 | -echo "STEP3" | 11 | +echo "Nocly done" |
14 | -GLOG_logtostderr=1 /mediapipe/examples/desktop/youtube8m/extract_yt8m_features \ | 12 | +GLOG_logtostderr=1 bazel-bin/mediapipe/examples/desktop/youtube8m/extract_yt8m_features \ |
15 | --calculator_graph_config_file=mediapipe/graphs/youtube8m/feature_extraction.pbtxt \ | 13 | --calculator_graph_config_file=mediapipe/graphs/youtube8m/feature_extraction.pbtxt \ |
16 | --input_side_packets=input_sequence_example=/tmp/mediapipe/metadata.pb \ | 14 | --input_side_packets=input_sequence_example=/tmp/mediapipe/metadata.pb \ |
17 | - --output_side_packets=output_sequence_example=/tmp/mediapipe/features.pb | ||
18 | -echo "COMPLETED" | ||
19 | - | ||
20 | -sleep 5 | ||
... | \ No newline at end of file | ... | \ No newline at end of file |
15 | + --output_side_packets=output_sequence_example=/tmp/mediapipe/features.pb | ||
... | \ No newline at end of file | ... | \ No newline at end of file | ... | ... |
web/backend/test.sh
0 → 100644
1 | +#!/bin bash | ||
2 | + | ||
3 | +cd ../../../mediapipe | ||
4 | +. venv/bin/activate | ||
5 | + | ||
6 | +/usr/local/bazel/3.4.0/lib/bazel/bin/bazel version && \ | ||
7 | +alias bazel='/usr/local/bazel/3.4.0/lib/bazel/bin/bazel' | ||
8 | + | ||
9 | +python -m mediapipe.examples.desktop.youtube8m.generate_input_sequence_example \ | ||
10 | + --path_to_input_video=/$1 \ | ||
11 | + --clip_end_time_sec=$2 | ||
12 | + | ||
13 | +GLOG_logtostderr=1 bazel-bin/mediapipe/examples/desktop/youtube8m/extract_yt8m_features \ | ||
14 | + --calculator_graph_config_file=mediapipe/graphs/youtube8m/feature_extraction.pbtxt \ | ||
15 | + --input_side_packets=input_sequence_example=/tmp/mediapipe/metadata.pb \ | ||
16 | + --output_side_packets=output_sequence_example=/tmp/mediapipe/features.pb | ||
... | \ No newline at end of file | ... | \ No newline at end of file |
1 | +{"model": "FrameLevelLogisticModel", "feature_sizes": "1024,128", "feature_names": "rgb,audio", "frame_features": true, "label_loss": "CrossEntropyLoss"} | ||
... | \ No newline at end of file | ... | \ No newline at end of file |
... | @@ -9,12 +9,12 @@ import video_recommender as recommender | ... | @@ -9,12 +9,12 @@ import video_recommender as recommender |
9 | import video_util as videoutil | 9 | import video_util as videoutil |
10 | 10 | ||
11 | # Define file paths. | 11 | # Define file paths. |
12 | -MODEL_PATH = "E:/khuhub/2015104192/web/backend/yt8m/esot3ria/model/inference_model/segment_inference_model" | 12 | +MODEL_PATH = "/mnt/e/khuhub/2015104192/web/backend/yt8m/esot3ria/model/inference_model/segment_inference_model" |
13 | -VOCAB_PATH = "E:/khuhub/2015104192/web/backend/yt8m/vocabulary.csv" | 13 | +VOCAB_PATH = "/mnt/e/khuhub/2015104192/web/backend/yt8m/vocabulary.csv" |
14 | -VIDEO_TAGS_PATH = "E:/khuhub/2015104192/web/backend/yt8m/esot3ria/kaggle_solution_40k.csv" | 14 | +VIDEO_TAGS_PATH = "/mnt/e/khuhub/2015104192/web/backend/yt8m/esot3ria/kaggle_solution_40k.csv" |
15 | -TAG_VECTOR_MODEL_PATH = "E:/khuhub/2015104192/web/backend/yt8m/esot3ria/tag_vectors.model" | 15 | +TAG_VECTOR_MODEL_PATH = "/mnt/e/khuhub/2015104192/web/backend/yt8m/esot3ria/tag_vectors.model" |
16 | -VIDEO_VECTOR_MODEL_PATH = "E:/khuhub/2015104192/web/backend/yt8m/esot3ria/video_vectors.model" | 16 | +VIDEO_VECTOR_MODEL_PATH = "/mnt/e/khuhub/2015104192/web/backend/yt8m/esot3ria/video_vectors.model" |
17 | -SEGMENT_LABEL_PATH = "E:/khuhub/2015104192/web/backend/yt8m/segment_label_ids.csv" | 17 | +SEGMENT_LABEL_PATH = "/mnt/e/khuhub/2015104192/web/backend/yt8m/segment_label_ids.csv" |
18 | 18 | ||
19 | # Define parameters. | 19 | # Define parameters. |
20 | TAG_TOP_K = 5 | 20 | TAG_TOP_K = 5 |
... | @@ -89,11 +89,12 @@ def inference_pb(file_path, threshold): | ... | @@ -89,11 +89,12 @@ def inference_pb(file_path, threshold): |
89 | print("started step 1") | 89 | print("started step 1") |
90 | VIDEO_TOP_K = int(threshold) | 90 | VIDEO_TOP_K = int(threshold) |
91 | inference_result = {} | 91 | inference_result = {} |
92 | + print("STEP ==== 1") | ||
92 | with tf.Session(config=tf.ConfigProto(allow_soft_placement=True)) as sess: | 93 | with tf.Session(config=tf.ConfigProto(allow_soft_placement=True)) as sess: |
93 | - | 94 | + print("STEP ==== 1.5 " + file_path) |
94 | # 0. Import SequenceExample type target from pb. | 95 | # 0. Import SequenceExample type target from pb. |
95 | target_video = pbutil.convert_pb(file_path) | 96 | target_video = pbutil.convert_pb(file_path) |
96 | - | 97 | + print("STEP ==== 2") |
97 | # 1. Load video features from pb. | 98 | # 1. Load video features from pb. |
98 | video_id_batch_val = np.array([b'video']) | 99 | video_id_batch_val = np.array([b'video']) |
99 | n_frames = len(target_video.feature_lists.feature_list['rgb'].feature) | 100 | n_frames = len(target_video.feature_lists.feature_list['rgb'].feature) |
... | @@ -109,13 +110,13 @@ def inference_pb(file_path, threshold): | ... | @@ -109,13 +110,13 @@ def inference_pb(file_path, threshold): |
109 | video_batch_val[i] = np.concatenate([video_batch_rgb, video_batch_audio], axis=0) | 110 | video_batch_val[i] = np.concatenate([video_batch_rgb, video_batch_audio], axis=0) |
110 | video_batch_val = np.array([video_batch_val]) | 111 | video_batch_val = np.array([video_batch_val]) |
111 | num_frames_batch_val = np.array([n_frames]) | 112 | num_frames_batch_val = np.array([n_frames]) |
112 | - | 113 | + print("STEP ==== 3") |
113 | # Restore checkpoint and meta-graph file. | 114 | # Restore checkpoint and meta-graph file. |
114 | if not gfile.Exists(MODEL_PATH + ".meta"): | 115 | if not gfile.Exists(MODEL_PATH + ".meta"): |
115 | raise IOError("Cannot find %s. Did you run eval.py?" % MODEL_PATH) | 116 | raise IOError("Cannot find %s. Did you run eval.py?" % MODEL_PATH) |
116 | meta_graph_location = MODEL_PATH + ".meta" | 117 | meta_graph_location = MODEL_PATH + ".meta" |
117 | logging.info("loading meta-graph: " + meta_graph_location) | 118 | logging.info("loading meta-graph: " + meta_graph_location) |
118 | - | 119 | + print("STEP ==== 4") |
119 | with tf.device("/cpu:0"): | 120 | with tf.device("/cpu:0"): |
120 | saver = tf.train.import_meta_graph(meta_graph_location, clear_devices=True) | 121 | saver = tf.train.import_meta_graph(meta_graph_location, clear_devices=True) |
121 | logging.info("restoring variables from " + MODEL_PATH) | 122 | logging.info("restoring variables from " + MODEL_PATH) |
... | @@ -123,7 +124,7 @@ def inference_pb(file_path, threshold): | ... | @@ -123,7 +124,7 @@ def inference_pb(file_path, threshold): |
123 | input_tensor = tf.get_collection("input_batch_raw")[0] | 124 | input_tensor = tf.get_collection("input_batch_raw")[0] |
124 | num_frames_tensor = tf.get_collection("num_frames")[0] | 125 | num_frames_tensor = tf.get_collection("num_frames")[0] |
125 | predictions_tensor = tf.get_collection("predictions")[0] | 126 | predictions_tensor = tf.get_collection("predictions")[0] |
126 | - | 127 | + print("STEP ==== 5") |
127 | # Workaround for num_epochs issue. | 128 | # Workaround for num_epochs issue. |
128 | def set_up_init_ops(variables): | 129 | def set_up_init_ops(variables): |
129 | init_op_list = [] | 130 | init_op_list = [] |
... | @@ -133,10 +134,10 @@ def inference_pb(file_path, threshold): | ... | @@ -133,10 +134,10 @@ def inference_pb(file_path, threshold): |
133 | variables.remove(variable) | 134 | variables.remove(variable) |
134 | init_op_list.append(tf.variables_initializer(variables)) | 135 | init_op_list.append(tf.variables_initializer(variables)) |
135 | return init_op_list | 136 | return init_op_list |
136 | - | 137 | + print("STEP ==== 6") |
137 | sess.run( | 138 | sess.run( |
138 | set_up_init_ops(tf.get_collection_ref(tf.GraphKeys.LOCAL_VARIABLES))) | 139 | set_up_init_ops(tf.get_collection_ref(tf.GraphKeys.LOCAL_VARIABLES))) |
139 | - | 140 | + print("STEP ==== 7") |
140 | whitelisted_cls_mask = np.zeros((predictions_tensor.get_shape()[-1],), | 141 | whitelisted_cls_mask = np.zeros((predictions_tensor.get_shape()[-1],), |
141 | dtype=np.float32) | 142 | dtype=np.float32) |
142 | with tf.io.gfile.GFile(SEGMENT_LABEL_PATH) as fobj: | 143 | with tf.io.gfile.GFile(SEGMENT_LABEL_PATH) as fobj: |
... | @@ -221,5 +222,5 @@ def inference_pb(file_path, threshold): | ... | @@ -221,5 +222,5 @@ def inference_pb(file_path, threshold): |
221 | 222 | ||
222 | if __name__ == '__main__': | 223 | if __name__ == '__main__': |
223 | filepath = "/tmp/mediapipe/features.pb" | 224 | filepath = "/tmp/mediapipe/features.pb" |
224 | - result = inference_pb(filepath) | 225 | + result = inference_pb(filepath,5) |
225 | print(result) | 226 | print(result) | ... | ... |
1 | import tensorflow as tf | 1 | import tensorflow as tf |
2 | import numpy | 2 | import numpy |
3 | +import pickle | ||
3 | 4 | ||
4 | 5 | ||
5 | def _make_bytes(int_array): | 6 | def _make_bytes(int_array): |
... | @@ -43,17 +44,20 @@ features = { | ... | @@ -43,17 +44,20 @@ features = { |
43 | 44 | ||
44 | 45 | ||
45 | def parse_exmp(serial_exmp): | 46 | def parse_exmp(serial_exmp): |
47 | + print("CAME IN SECOND======111111 : ") | ||
46 | _, sequence_parsed = tf.io.parse_single_sequence_example( | 48 | _, sequence_parsed = tf.io.parse_single_sequence_example( |
47 | serialized=serial_exmp, | 49 | serialized=serial_exmp, |
48 | context_features=contexts, | 50 | context_features=contexts, |
49 | sequence_features=features) | 51 | sequence_features=features) |
50 | - | 52 | + print("CAME IN SECOND=====222222 := ") |
51 | - sequence_parsed = tf.contrib.learn.run_n(sequence_parsed)[0] | 53 | + sequence_parsed = tf.contrib.learn.run_n(sequence_parsed, n=1, feed_dict=None)[0] |
52 | - | 54 | + print("CAME IN SECOND=====3333333") |
53 | audio = sequence_parsed['AUDIO/feature/floats'].values | 55 | audio = sequence_parsed['AUDIO/feature/floats'].values |
56 | + print("CAME IN SECOND=====44444444") | ||
54 | rgb = sequence_parsed['RGB/feature/floats'].values | 57 | rgb = sequence_parsed['RGB/feature/floats'].values |
55 | - | 58 | + print("CAME IN SECOND=====5555555") |
56 | - # print(audio.values) | 59 | + print("======AUDIO=======") |
60 | + print(audio) | ||
57 | # print(type(audio.values)) | 61 | # print(type(audio.values)) |
58 | 62 | ||
59 | # audio is 128 8bit, rgb is 1024 8bit for every second | 63 | # audio is 128 8bit, rgb is 1024 8bit for every second |
... | @@ -112,9 +116,21 @@ def make_exmp(id, audio, rgb): | ... | @@ -112,9 +116,21 @@ def make_exmp(id, audio, rgb): |
112 | 116 | ||
113 | 117 | ||
114 | def convert_pb(filename): | 118 | def convert_pb(filename): |
119 | + print("==============") | ||
120 | + print("CAME IN! " + filename) | ||
121 | + print("==============") | ||
115 | sequence_example = open(filename, 'rb').read() | 122 | sequence_example = open(filename, 'rb').read() |
123 | + print("==============") | ||
124 | + print("IM 1 ::: ") | ||
125 | + print("==============") | ||
116 | audio, rgb = parse_exmp(sequence_example) | 126 | audio, rgb = parse_exmp(sequence_example) |
127 | + print("==============") | ||
128 | + print("IM 2") | ||
129 | + print("==============") | ||
117 | tmp_example = make_exmp('video', audio, rgb) | 130 | tmp_example = make_exmp('video', audio, rgb) |
118 | - | 131 | + print("==============") |
132 | + print("IM 3") | ||
133 | + print("==============") | ||
119 | decoded = tf.train.SequenceExample.FromString(tmp_example) | 134 | decoded = tf.train.SequenceExample.FromString(tmp_example) |
135 | + print("DECODE COMPLETE") | ||
120 | return decoded | 136 | return decoded | ... | ... |
... | @@ -12,6 +12,8 @@ def getURL(vid_id): | ... | @@ -12,6 +12,8 @@ def getURL(vid_id): |
12 | return youtube_url + response.text[10:-3] | 12 | return youtube_url + response.text[10:-3] |
13 | 13 | ||
14 | 14 | ||
15 | +print(getURL('udtT')) | ||
16 | + | ||
15 | def getVideoInfo(vid_id, video_tags_path, top_k): | 17 | def getVideoInfo(vid_id, video_tags_path, top_k): |
16 | video_url = getURL(vid_id) | 18 | video_url = getURL(vid_id) |
17 | 19 | ... | ... |
... | @@ -33,7 +33,7 @@ FLAGS = flags.FLAGS | ... | @@ -33,7 +33,7 @@ FLAGS = flags.FLAGS |
33 | if __name__ == "__main__": | 33 | if __name__ == "__main__": |
34 | # Dataset flags. | 34 | # Dataset flags. |
35 | flags.DEFINE_string( | 35 | flags.DEFINE_string( |
36 | - "train_dir", "/tmp/yt8m_model/", | 36 | + "train_dir", "F:/yt8mDataset/savedModel", |
37 | "The directory to load the model files from. " | 37 | "The directory to load the model files from. " |
38 | "The tensorboard metrics files are also saved to this " | 38 | "The tensorboard metrics files are also saved to this " |
39 | "directory.") | 39 | "directory.") | ... | ... |
... | @@ -85,7 +85,7 @@ if __name__ == "__main__": | ... | @@ -85,7 +85,7 @@ if __name__ == "__main__": |
85 | flags.DEFINE_integer("top_k", 5, "How many predictions to output per video.") | 85 | flags.DEFINE_integer("top_k", 5, "How many predictions to output per video.") |
86 | 86 | ||
87 | # Other flags. | 87 | # Other flags. |
88 | - flags.DEFINE_integer("batch_size", 512, | 88 | + flags.DEFINE_integer("batch_size", 32, |
89 | "How many examples to process per batch.") | 89 | "How many examples to process per batch.") |
90 | flags.DEFINE_integer("num_readers", 1, | 90 | flags.DEFINE_integer("num_readers", 1, |
91 | "How many threads to use for reading input files.") | 91 | "How many threads to use for reading input files.") |
... | @@ -276,7 +276,7 @@ def inference(reader, train_dir, data_pattern, out_file_location, batch_size, | ... | @@ -276,7 +276,7 @@ def inference(reader, train_dir, data_pattern, out_file_location, batch_size, |
276 | #open vocab csv file and store to dictionary | 276 | #open vocab csv file and store to dictionary |
277 | #========================================= | 277 | #========================================= |
278 | voca_dict = {} | 278 | voca_dict = {} |
279 | - vocabs = open("./vocabulary.csv", 'r') | 279 | + vocabs = open("./vocabulary.csv", 'r',encoding='UTF8') |
280 | while True: | 280 | while True: |
281 | line = vocabs.readline() | 281 | line = vocabs.readline() |
282 | if not line: break | 282 | if not line: break | ... | ... |
... | @@ -75,7 +75,7 @@ if __name__ == "__main__": | ... | @@ -75,7 +75,7 @@ if __name__ == "__main__": |
75 | flags.DEFINE_integer( | 75 | flags.DEFINE_integer( |
76 | "num_gpu", 1, "The maximum number of GPU devices to use for training. " | 76 | "num_gpu", 1, "The maximum number of GPU devices to use for training. " |
77 | "Flag only applies if GPUs are installed") | 77 | "Flag only applies if GPUs are installed") |
78 | - flags.DEFINE_integer("batch_size", 128, | 78 | + flags.DEFINE_integer("batch_size", 64, |
79 | "How many examples to process per batch for training.") | 79 | "How many examples to process per batch for training.") |
80 | flags.DEFINE_string("label_loss", "CrossEntropyLoss", | 80 | flags.DEFINE_string("label_loss", "CrossEntropyLoss", |
81 | "Which loss function to use for training the model.") | 81 | "Which loss function to use for training the model.") |
... | @@ -83,14 +83,14 @@ if __name__ == "__main__": | ... | @@ -83,14 +83,14 @@ if __name__ == "__main__": |
83 | "regularization_penalty", 1.0, | 83 | "regularization_penalty", 1.0, |
84 | "How much weight to give to the regularization loss (the label loss has " | 84 | "How much weight to give to the regularization loss (the label loss has " |
85 | "a weight of 1).") | 85 | "a weight of 1).") |
86 | - flags.DEFINE_float("base_learning_rate", 0.001, | 86 | + flags.DEFINE_float("base_learning_rate", 0.002, |
87 | "Which learning rate to start with.") | 87 | "Which learning rate to start with.") |
88 | flags.DEFINE_float( | 88 | flags.DEFINE_float( |
89 | "learning_rate_decay", 0.8, | 89 | "learning_rate_decay", 0.8, |
90 | "Learning rate decay factor to be applied every " | 90 | "Learning rate decay factor to be applied every " |
91 | "learning_rate_decay_examples.") | 91 | "learning_rate_decay_examples.") |
92 | flags.DEFINE_float( | 92 | flags.DEFINE_float( |
93 | - "learning_rate_decay_examples", 100, | 93 | + "learning_rate_decay_examples", 70, |
94 | "Multiply current learning rate by learning_rate_decay " | 94 | "Multiply current learning rate by learning_rate_decay " |
95 | "every learning_rate_decay_examples.") | 95 | "every learning_rate_decay_examples.") |
96 | flags.DEFINE_integer( | 96 | flags.DEFINE_integer( |
... | @@ -100,7 +100,7 @@ if __name__ == "__main__": | ... | @@ -100,7 +100,7 @@ if __name__ == "__main__": |
100 | "max_steps", None, | 100 | "max_steps", None, |
101 | "The maximum number of iterations of the training loop.") | 101 | "The maximum number of iterations of the training loop.") |
102 | flags.DEFINE_integer( | 102 | flags.DEFINE_integer( |
103 | - "export_model_steps", 100, | 103 | + "export_model_steps", 5, |
104 | "The period, in number of steps, with which the model " | 104 | "The period, in number of steps, with which the model " |
105 | "is exported for batch prediction.") | 105 | "is exported for batch prediction.") |
106 | 106 | ... | ... |
-
Please register or login to post a comment