김성연

Add weekly report and Refactor flask web code

Showing 24 changed files with 224 additions and 2459 deletions
...@@ -2,7 +2,7 @@ import sys ...@@ -2,7 +2,7 @@ import sys
2 import os 2 import os
3 3
4 from flask.helpers import url_for 4 from flask.helpers import url_for
5 -from face_emotion_recognition import face_recognition, video2 5 +from face_emotion_recognition import face_recognition, video4
6 from flask import Flask, render_template 6 from flask import Flask, render_template
7 from flask.globals import request 7 from flask.globals import request
8 from werkzeug.utils import redirect, secure_filename 8 from werkzeug.utils import redirect, secure_filename
...@@ -39,7 +39,7 @@ def index(): ...@@ -39,7 +39,7 @@ def index():
39 @app.route('/goTest', methods=('GET', 'POST')) # 접속하는 url 39 @app.route('/goTest', methods=('GET', 'POST')) # 접속하는 url
40 def test(): 40 def test():
41 if request.method == 'GET': 41 if request.method == 'GET':
42 - return render_template('test.html', face_imgs=find_face_imgs()) 42 + return render_template('test.html')
43 43
44 44
45 @app.route('/uploadFace', methods=('GET', 'POST')) 45 @app.route('/uploadFace', methods=('GET', 'POST'))
...@@ -61,11 +61,19 @@ def delete_face(face_name): ...@@ -61,11 +61,19 @@ def delete_face(face_name):
61 return redirect(url_for('index')) 61 return redirect(url_for('index'))
62 62
63 63
64 -@app.route('/uploadVideo') 64 +@app.route('/uploadVideo', methods=('GET', 'POST'))
65 def upload_video(): 65 def upload_video():
66 - f = request.files.get('video') 66 + if request.method == 'POST':
67 - f.save("./static/video/" + secure_filename(f.filename)) 67 + f = request.files.get('video')
68 - return 'video uploaded successfully' 68 + f.save("./static/video/" + secure_filename(f.filename))
69 + return redirect(url_for('test'))
70 +
71 +
72 +@app.route('/faceEmotinoRecognition')
73 +def faceEmotinoRecognition():
74 + face_emotion_dict = video4.videoDetector(3, 'record0')
75 + print(face_emotion_dict)
76 + return render_template('result.html', face_emotion_dict=face_emotion_dict, face_imgs=find_face_imgs())
69 77
70 78
71 if __name__ == "__main__": 79 if __name__ == "__main__":
......
This diff could not be displayed because it is too large.
1 -node {
2 - name: "data"
3 - op: "Placeholder"
4 - attr {
5 - key: "dtype"
6 - value {
7 - type: DT_FLOAT
8 - }
9 - }
10 -}
11 -node {
12 - name: "data_bn/FusedBatchNorm"
13 - op: "FusedBatchNorm"
14 - input: "data:0"
15 - input: "data_bn/gamma"
16 - input: "data_bn/beta"
17 - input: "data_bn/mean"
18 - input: "data_bn/std"
19 - attr {
20 - key: "epsilon"
21 - value {
22 - f: 1.00099996416e-05
23 - }
24 - }
25 -}
26 -node {
27 - name: "data_scale/Mul"
28 - op: "Mul"
29 - input: "data_bn/FusedBatchNorm"
30 - input: "data_scale/mul"
31 -}
32 -node {
33 - name: "data_scale/BiasAdd"
34 - op: "BiasAdd"
35 - input: "data_scale/Mul"
36 - input: "data_scale/add"
37 -}
38 -node {
39 - name: "SpaceToBatchND/block_shape"
40 - op: "Const"
41 - attr {
42 - key: "value"
43 - value {
44 - tensor {
45 - dtype: DT_INT32
46 - tensor_shape {
47 - dim {
48 - size: 2
49 - }
50 - }
51 - int_val: 1
52 - int_val: 1
53 - }
54 - }
55 - }
56 -}
57 -node {
58 - name: "SpaceToBatchND/paddings"
59 - op: "Const"
60 - attr {
61 - key: "value"
62 - value {
63 - tensor {
64 - dtype: DT_INT32
65 - tensor_shape {
66 - dim {
67 - size: 2
68 - }
69 - dim {
70 - size: 2
71 - }
72 - }
73 - int_val: 3
74 - int_val: 3
75 - int_val: 3
76 - int_val: 3
77 - }
78 - }
79 - }
80 -}
81 -node {
82 - name: "Pad"
83 - op: "SpaceToBatchND"
84 - input: "data_scale/BiasAdd"
85 - input: "SpaceToBatchND/block_shape"
86 - input: "SpaceToBatchND/paddings"
87 -}
88 -node {
89 - name: "conv1_h/Conv2D"
90 - op: "Conv2D"
91 - input: "Pad"
92 - input: "conv1_h/weights"
93 - attr {
94 - key: "dilations"
95 - value {
96 - list {
97 - i: 1
98 - i: 1
99 - i: 1
100 - i: 1
101 - }
102 - }
103 - }
104 - attr {
105 - key: "padding"
106 - value {
107 - s: "VALID"
108 - }
109 - }
110 - attr {
111 - key: "strides"
112 - value {
113 - list {
114 - i: 1
115 - i: 2
116 - i: 2
117 - i: 1
118 - }
119 - }
120 - }
121 -}
122 -node {
123 - name: "conv1_h/BiasAdd"
124 - op: "BiasAdd"
125 - input: "conv1_h/Conv2D"
126 - input: "conv1_h/bias"
127 -}
128 -node {
129 - name: "BatchToSpaceND"
130 - op: "BatchToSpaceND"
131 - input: "conv1_h/BiasAdd"
132 -}
133 -node {
134 - name: "conv1_bn_h/FusedBatchNorm"
135 - op: "FusedBatchNorm"
136 - input: "BatchToSpaceND"
137 - input: "conv1_bn_h/gamma"
138 - input: "conv1_bn_h/beta"
139 - input: "conv1_bn_h/mean"
140 - input: "conv1_bn_h/std"
141 - attr {
142 - key: "epsilon"
143 - value {
144 - f: 1.00099996416e-05
145 - }
146 - }
147 -}
148 -node {
149 - name: "conv1_scale_h/Mul"
150 - op: "Mul"
151 - input: "conv1_bn_h/FusedBatchNorm"
152 - input: "conv1_scale_h/mul"
153 -}
154 -node {
155 - name: "conv1_scale_h/BiasAdd"
156 - op: "BiasAdd"
157 - input: "conv1_scale_h/Mul"
158 - input: "conv1_scale_h/add"
159 -}
160 -node {
161 - name: "Relu"
162 - op: "Relu"
163 - input: "conv1_scale_h/BiasAdd"
164 -}
165 -node {
166 - name: "conv1_pool/MaxPool"
167 - op: "MaxPool"
168 - input: "Relu"
169 - attr {
170 - key: "ksize"
171 - value {
172 - list {
173 - i: 1
174 - i: 3
175 - i: 3
176 - i: 1
177 - }
178 - }
179 - }
180 - attr {
181 - key: "padding"
182 - value {
183 - s: "SAME"
184 - }
185 - }
186 - attr {
187 - key: "strides"
188 - value {
189 - list {
190 - i: 1
191 - i: 2
192 - i: 2
193 - i: 1
194 - }
195 - }
196 - }
197 -}
198 -node {
199 - name: "layer_64_1_conv1_h/Conv2D"
200 - op: "Conv2D"
201 - input: "conv1_pool/MaxPool"
202 - input: "layer_64_1_conv1_h/weights"
203 - attr {
204 - key: "dilations"
205 - value {
206 - list {
207 - i: 1
208 - i: 1
209 - i: 1
210 - i: 1
211 - }
212 - }
213 - }
214 - attr {
215 - key: "padding"
216 - value {
217 - s: "SAME"
218 - }
219 - }
220 - attr {
221 - key: "strides"
222 - value {
223 - list {
224 - i: 1
225 - i: 1
226 - i: 1
227 - i: 1
228 - }
229 - }
230 - }
231 -}
232 -node {
233 - name: "layer_64_1_bn2_h/FusedBatchNorm"
234 - op: "BiasAdd"
235 - input: "layer_64_1_conv1_h/Conv2D"
236 - input: "layer_64_1_conv1_h/Conv2D_bn_offset"
237 -}
238 -node {
239 - name: "layer_64_1_scale2_h/Mul"
240 - op: "Mul"
241 - input: "layer_64_1_bn2_h/FusedBatchNorm"
242 - input: "layer_64_1_scale2_h/mul"
243 -}
244 -node {
245 - name: "layer_64_1_scale2_h/BiasAdd"
246 - op: "BiasAdd"
247 - input: "layer_64_1_scale2_h/Mul"
248 - input: "layer_64_1_scale2_h/add"
249 -}
250 -node {
251 - name: "Relu_1"
252 - op: "Relu"
253 - input: "layer_64_1_scale2_h/BiasAdd"
254 -}
255 -node {
256 - name: "layer_64_1_conv2_h/Conv2D"
257 - op: "Conv2D"
258 - input: "Relu_1"
259 - input: "layer_64_1_conv2_h/weights"
260 - attr {
261 - key: "dilations"
262 - value {
263 - list {
264 - i: 1
265 - i: 1
266 - i: 1
267 - i: 1
268 - }
269 - }
270 - }
271 - attr {
272 - key: "padding"
273 - value {
274 - s: "SAME"
275 - }
276 - }
277 - attr {
278 - key: "strides"
279 - value {
280 - list {
281 - i: 1
282 - i: 1
283 - i: 1
284 - i: 1
285 - }
286 - }
287 - }
288 -}
289 -node {
290 - name: "add"
291 - op: "Add"
292 - input: "layer_64_1_conv2_h/Conv2D"
293 - input: "conv1_pool/MaxPool"
294 -}
295 -node {
296 - name: "layer_128_1_bn1_h/FusedBatchNorm"
297 - op: "FusedBatchNorm"
298 - input: "add"
299 - input: "layer_128_1_bn1_h/gamma"
300 - input: "layer_128_1_bn1_h/beta"
301 - input: "layer_128_1_bn1_h/mean"
302 - input: "layer_128_1_bn1_h/std"
303 - attr {
304 - key: "epsilon"
305 - value {
306 - f: 1.00099996416e-05
307 - }
308 - }
309 -}
310 -node {
311 - name: "layer_128_1_scale1_h/Mul"
312 - op: "Mul"
313 - input: "layer_128_1_bn1_h/FusedBatchNorm"
314 - input: "layer_128_1_scale1_h/mul"
315 -}
316 -node {
317 - name: "layer_128_1_scale1_h/BiasAdd"
318 - op: "BiasAdd"
319 - input: "layer_128_1_scale1_h/Mul"
320 - input: "layer_128_1_scale1_h/add"
321 -}
322 -node {
323 - name: "Relu_2"
324 - op: "Relu"
325 - input: "layer_128_1_scale1_h/BiasAdd"
326 -}
327 -node {
328 - name: "layer_128_1_conv_expand_h/Conv2D"
329 - op: "Conv2D"
330 - input: "Relu_2"
331 - input: "layer_128_1_conv_expand_h/weights"
332 - attr {
333 - key: "dilations"
334 - value {
335 - list {
336 - i: 1
337 - i: 1
338 - i: 1
339 - i: 1
340 - }
341 - }
342 - }
343 - attr {
344 - key: "padding"
345 - value {
346 - s: "SAME"
347 - }
348 - }
349 - attr {
350 - key: "strides"
351 - value {
352 - list {
353 - i: 1
354 - i: 2
355 - i: 2
356 - i: 1
357 - }
358 - }
359 - }
360 -}
361 -node {
362 - name: "layer_128_1_conv1_h/Conv2D"
363 - op: "Conv2D"
364 - input: "Relu_2"
365 - input: "layer_128_1_conv1_h/weights"
366 - attr {
367 - key: "dilations"
368 - value {
369 - list {
370 - i: 1
371 - i: 1
372 - i: 1
373 - i: 1
374 - }
375 - }
376 - }
377 - attr {
378 - key: "padding"
379 - value {
380 - s: "SAME"
381 - }
382 - }
383 - attr {
384 - key: "strides"
385 - value {
386 - list {
387 - i: 1
388 - i: 2
389 - i: 2
390 - i: 1
391 - }
392 - }
393 - }
394 -}
395 -node {
396 - name: "layer_128_1_bn2/FusedBatchNorm"
397 - op: "BiasAdd"
398 - input: "layer_128_1_conv1_h/Conv2D"
399 - input: "layer_128_1_conv1_h/Conv2D_bn_offset"
400 -}
401 -node {
402 - name: "layer_128_1_scale2/Mul"
403 - op: "Mul"
404 - input: "layer_128_1_bn2/FusedBatchNorm"
405 - input: "layer_128_1_scale2/mul"
406 -}
407 -node {
408 - name: "layer_128_1_scale2/BiasAdd"
409 - op: "BiasAdd"
410 - input: "layer_128_1_scale2/Mul"
411 - input: "layer_128_1_scale2/add"
412 -}
413 -node {
414 - name: "Relu_3"
415 - op: "Relu"
416 - input: "layer_128_1_scale2/BiasAdd"
417 -}
418 -node {
419 - name: "layer_128_1_conv2/Conv2D"
420 - op: "Conv2D"
421 - input: "Relu_3"
422 - input: "layer_128_1_conv2/weights"
423 - attr {
424 - key: "dilations"
425 - value {
426 - list {
427 - i: 1
428 - i: 1
429 - i: 1
430 - i: 1
431 - }
432 - }
433 - }
434 - attr {
435 - key: "padding"
436 - value {
437 - s: "SAME"
438 - }
439 - }
440 - attr {
441 - key: "strides"
442 - value {
443 - list {
444 - i: 1
445 - i: 1
446 - i: 1
447 - i: 1
448 - }
449 - }
450 - }
451 -}
452 -node {
453 - name: "add_1"
454 - op: "Add"
455 - input: "layer_128_1_conv2/Conv2D"
456 - input: "layer_128_1_conv_expand_h/Conv2D"
457 -}
458 -node {
459 - name: "layer_256_1_bn1/FusedBatchNorm"
460 - op: "FusedBatchNorm"
461 - input: "add_1"
462 - input: "layer_256_1_bn1/gamma"
463 - input: "layer_256_1_bn1/beta"
464 - input: "layer_256_1_bn1/mean"
465 - input: "layer_256_1_bn1/std"
466 - attr {
467 - key: "epsilon"
468 - value {
469 - f: 1.00099996416e-05
470 - }
471 - }
472 -}
473 -node {
474 - name: "layer_256_1_scale1/Mul"
475 - op: "Mul"
476 - input: "layer_256_1_bn1/FusedBatchNorm"
477 - input: "layer_256_1_scale1/mul"
478 -}
479 -node {
480 - name: "layer_256_1_scale1/BiasAdd"
481 - op: "BiasAdd"
482 - input: "layer_256_1_scale1/Mul"
483 - input: "layer_256_1_scale1/add"
484 -}
485 -node {
486 - name: "Relu_4"
487 - op: "Relu"
488 - input: "layer_256_1_scale1/BiasAdd"
489 -}
490 -node {
491 - name: "SpaceToBatchND_1/paddings"
492 - op: "Const"
493 - attr {
494 - key: "value"
495 - value {
496 - tensor {
497 - dtype: DT_INT32
498 - tensor_shape {
499 - dim {
500 - size: 2
501 - }
502 - dim {
503 - size: 2
504 - }
505 - }
506 - int_val: 1
507 - int_val: 1
508 - int_val: 1
509 - int_val: 1
510 - }
511 - }
512 - }
513 -}
514 -node {
515 - name: "layer_256_1_conv_expand/Conv2D"
516 - op: "Conv2D"
517 - input: "Relu_4"
518 - input: "layer_256_1_conv_expand/weights"
519 - attr {
520 - key: "dilations"
521 - value {
522 - list {
523 - i: 1
524 - i: 1
525 - i: 1
526 - i: 1
527 - }
528 - }
529 - }
530 - attr {
531 - key: "padding"
532 - value {
533 - s: "SAME"
534 - }
535 - }
536 - attr {
537 - key: "strides"
538 - value {
539 - list {
540 - i: 1
541 - i: 2
542 - i: 2
543 - i: 1
544 - }
545 - }
546 - }
547 -}
548 -node {
549 - name: "conv4_3_norm/l2_normalize"
550 - op: "L2Normalize"
551 - input: "Relu_4:0"
552 - input: "conv4_3_norm/l2_normalize/Sum/reduction_indices"
553 -}
554 -node {
555 - name: "conv4_3_norm/mul_1"
556 - op: "Mul"
557 - input: "conv4_3_norm/l2_normalize"
558 - input: "conv4_3_norm/mul"
559 -}
560 -node {
561 - name: "conv4_3_norm_mbox_loc/Conv2D"
562 - op: "Conv2D"
563 - input: "conv4_3_norm/mul_1"
564 - input: "conv4_3_norm_mbox_loc/weights"
565 - attr {
566 - key: "dilations"
567 - value {
568 - list {
569 - i: 1
570 - i: 1
571 - i: 1
572 - i: 1
573 - }
574 - }
575 - }
576 - attr {
577 - key: "padding"
578 - value {
579 - s: "SAME"
580 - }
581 - }
582 - attr {
583 - key: "strides"
584 - value {
585 - list {
586 - i: 1
587 - i: 1
588 - i: 1
589 - i: 1
590 - }
591 - }
592 - }
593 -}
594 -node {
595 - name: "conv4_3_norm_mbox_loc/BiasAdd"
596 - op: "BiasAdd"
597 - input: "conv4_3_norm_mbox_loc/Conv2D"
598 - input: "conv4_3_norm_mbox_loc/bias"
599 -}
600 -node {
601 - name: "flatten/Reshape"
602 - op: "Flatten"
603 - input: "conv4_3_norm_mbox_loc/BiasAdd"
604 -}
605 -node {
606 - name: "conv4_3_norm_mbox_conf/Conv2D"
607 - op: "Conv2D"
608 - input: "conv4_3_norm/mul_1"
609 - input: "conv4_3_norm_mbox_conf/weights"
610 - attr {
611 - key: "dilations"
612 - value {
613 - list {
614 - i: 1
615 - i: 1
616 - i: 1
617 - i: 1
618 - }
619 - }
620 - }
621 - attr {
622 - key: "padding"
623 - value {
624 - s: "SAME"
625 - }
626 - }
627 - attr {
628 - key: "strides"
629 - value {
630 - list {
631 - i: 1
632 - i: 1
633 - i: 1
634 - i: 1
635 - }
636 - }
637 - }
638 -}
639 -node {
640 - name: "conv4_3_norm_mbox_conf/BiasAdd"
641 - op: "BiasAdd"
642 - input: "conv4_3_norm_mbox_conf/Conv2D"
643 - input: "conv4_3_norm_mbox_conf/bias"
644 -}
645 -node {
646 - name: "flatten_6/Reshape"
647 - op: "Flatten"
648 - input: "conv4_3_norm_mbox_conf/BiasAdd"
649 -}
650 -node {
651 - name: "Pad_1"
652 - op: "SpaceToBatchND"
653 - input: "Relu_4"
654 - input: "SpaceToBatchND/block_shape"
655 - input: "SpaceToBatchND_1/paddings"
656 -}
657 -node {
658 - name: "layer_256_1_conv1/Conv2D"
659 - op: "Conv2D"
660 - input: "Pad_1"
661 - input: "layer_256_1_conv1/weights"
662 - attr {
663 - key: "dilations"
664 - value {
665 - list {
666 - i: 1
667 - i: 1
668 - i: 1
669 - i: 1
670 - }
671 - }
672 - }
673 - attr {
674 - key: "padding"
675 - value {
676 - s: "VALID"
677 - }
678 - }
679 - attr {
680 - key: "strides"
681 - value {
682 - list {
683 - i: 1
684 - i: 2
685 - i: 2
686 - i: 1
687 - }
688 - }
689 - }
690 -}
691 -node {
692 - name: "layer_256_1_bn2/FusedBatchNorm"
693 - op: "BiasAdd"
694 - input: "layer_256_1_conv1/Conv2D"
695 - input: "layer_256_1_conv1/Conv2D_bn_offset"
696 -}
697 -node {
698 - name: "BatchToSpaceND_1"
699 - op: "BatchToSpaceND"
700 - input: "layer_256_1_bn2/FusedBatchNorm"
701 -}
702 -node {
703 - name: "layer_256_1_scale2/Mul"
704 - op: "Mul"
705 - input: "BatchToSpaceND_1"
706 - input: "layer_256_1_scale2/mul"
707 -}
708 -node {
709 - name: "layer_256_1_scale2/BiasAdd"
710 - op: "BiasAdd"
711 - input: "layer_256_1_scale2/Mul"
712 - input: "layer_256_1_scale2/add"
713 -}
714 -node {
715 - name: "Relu_5"
716 - op: "Relu"
717 - input: "layer_256_1_scale2/BiasAdd"
718 -}
719 -node {
720 - name: "layer_256_1_conv2/Conv2D"
721 - op: "Conv2D"
722 - input: "Relu_5"
723 - input: "layer_256_1_conv2/weights"
724 - attr {
725 - key: "dilations"
726 - value {
727 - list {
728 - i: 1
729 - i: 1
730 - i: 1
731 - i: 1
732 - }
733 - }
734 - }
735 - attr {
736 - key: "padding"
737 - value {
738 - s: "SAME"
739 - }
740 - }
741 - attr {
742 - key: "strides"
743 - value {
744 - list {
745 - i: 1
746 - i: 1
747 - i: 1
748 - i: 1
749 - }
750 - }
751 - }
752 -}
753 -node {
754 - name: "add_2"
755 - op: "Add"
756 - input: "layer_256_1_conv2/Conv2D"
757 - input: "layer_256_1_conv_expand/Conv2D"
758 -}
759 -node {
760 - name: "layer_512_1_bn1/FusedBatchNorm"
761 - op: "FusedBatchNorm"
762 - input: "add_2"
763 - input: "layer_512_1_bn1/gamma"
764 - input: "layer_512_1_bn1/beta"
765 - input: "layer_512_1_bn1/mean"
766 - input: "layer_512_1_bn1/std"
767 - attr {
768 - key: "epsilon"
769 - value {
770 - f: 1.00099996416e-05
771 - }
772 - }
773 -}
774 -node {
775 - name: "layer_512_1_scale1/Mul"
776 - op: "Mul"
777 - input: "layer_512_1_bn1/FusedBatchNorm"
778 - input: "layer_512_1_scale1/mul"
779 -}
780 -node {
781 - name: "layer_512_1_scale1/BiasAdd"
782 - op: "BiasAdd"
783 - input: "layer_512_1_scale1/Mul"
784 - input: "layer_512_1_scale1/add"
785 -}
786 -node {
787 - name: "Relu_6"
788 - op: "Relu"
789 - input: "layer_512_1_scale1/BiasAdd"
790 -}
791 -node {
792 - name: "layer_512_1_conv_expand_h/Conv2D"
793 - op: "Conv2D"
794 - input: "Relu_6"
795 - input: "layer_512_1_conv_expand_h/weights"
796 - attr {
797 - key: "dilations"
798 - value {
799 - list {
800 - i: 1
801 - i: 1
802 - i: 1
803 - i: 1
804 - }
805 - }
806 - }
807 - attr {
808 - key: "padding"
809 - value {
810 - s: "SAME"
811 - }
812 - }
813 - attr {
814 - key: "strides"
815 - value {
816 - list {
817 - i: 1
818 - i: 1
819 - i: 1
820 - i: 1
821 - }
822 - }
823 - }
824 -}
825 -node {
826 - name: "layer_512_1_conv1_h/Conv2D"
827 - op: "Conv2D"
828 - input: "Relu_6"
829 - input: "layer_512_1_conv1_h/weights"
830 - attr {
831 - key: "dilations"
832 - value {
833 - list {
834 - i: 1
835 - i: 1
836 - i: 1
837 - i: 1
838 - }
839 - }
840 - }
841 - attr {
842 - key: "padding"
843 - value {
844 - s: "SAME"
845 - }
846 - }
847 - attr {
848 - key: "strides"
849 - value {
850 - list {
851 - i: 1
852 - i: 1
853 - i: 1
854 - i: 1
855 - }
856 - }
857 - }
858 -}
859 -node {
860 - name: "layer_512_1_bn2_h/FusedBatchNorm"
861 - op: "BiasAdd"
862 - input: "layer_512_1_conv1_h/Conv2D"
863 - input: "layer_512_1_conv1_h/Conv2D_bn_offset"
864 -}
865 -node {
866 - name: "layer_512_1_scale2_h/Mul"
867 - op: "Mul"
868 - input: "layer_512_1_bn2_h/FusedBatchNorm"
869 - input: "layer_512_1_scale2_h/mul"
870 -}
871 -node {
872 - name: "layer_512_1_scale2_h/BiasAdd"
873 - op: "BiasAdd"
874 - input: "layer_512_1_scale2_h/Mul"
875 - input: "layer_512_1_scale2_h/add"
876 -}
877 -node {
878 - name: "Relu_7"
879 - op: "Relu"
880 - input: "layer_512_1_scale2_h/BiasAdd"
881 -}
882 -node {
883 - name: "layer_512_1_conv2_h/convolution/SpaceToBatchND"
884 - op: "SpaceToBatchND"
885 - input: "Relu_7"
886 - input: "layer_512_1_conv2_h/convolution/SpaceToBatchND/block_shape"
887 - input: "layer_512_1_conv2_h/convolution/SpaceToBatchND/paddings"
888 -}
889 -node {
890 - name: "layer_512_1_conv2_h/convolution"
891 - op: "Conv2D"
892 - input: "layer_512_1_conv2_h/convolution/SpaceToBatchND"
893 - input: "layer_512_1_conv2_h/weights"
894 - attr {
895 - key: "dilations"
896 - value {
897 - list {
898 - i: 1
899 - i: 1
900 - i: 1
901 - i: 1
902 - }
903 - }
904 - }
905 - attr {
906 - key: "padding"
907 - value {
908 - s: "VALID"
909 - }
910 - }
911 - attr {
912 - key: "strides"
913 - value {
914 - list {
915 - i: 1
916 - i: 1
917 - i: 1
918 - i: 1
919 - }
920 - }
921 - }
922 -}
923 -node {
924 - name: "layer_512_1_conv2_h/convolution/BatchToSpaceND"
925 - op: "BatchToSpaceND"
926 - input: "layer_512_1_conv2_h/convolution"
927 - input: "layer_512_1_conv2_h/convolution/BatchToSpaceND/block_shape"
928 - input: "layer_512_1_conv2_h/convolution/BatchToSpaceND/crops"
929 -}
930 -node {
931 - name: "add_3"
932 - op: "Add"
933 - input: "layer_512_1_conv2_h/convolution/BatchToSpaceND"
934 - input: "layer_512_1_conv_expand_h/Conv2D"
935 -}
936 -node {
937 - name: "last_bn_h/FusedBatchNorm"
938 - op: "FusedBatchNorm"
939 - input: "add_3"
940 - input: "last_bn_h/gamma"
941 - input: "last_bn_h/beta"
942 - input: "last_bn_h/mean"
943 - input: "last_bn_h/std"
944 - attr {
945 - key: "epsilon"
946 - value {
947 - f: 1.00099996416e-05
948 - }
949 - }
950 -}
951 -node {
952 - name: "last_scale_h/Mul"
953 - op: "Mul"
954 - input: "last_bn_h/FusedBatchNorm"
955 - input: "last_scale_h/mul"
956 -}
957 -node {
958 - name: "last_scale_h/BiasAdd"
959 - op: "BiasAdd"
960 - input: "last_scale_h/Mul"
961 - input: "last_scale_h/add"
962 -}
963 -node {
964 - name: "last_relu"
965 - op: "Relu"
966 - input: "last_scale_h/BiasAdd"
967 -}
968 -node {
969 - name: "conv6_1_h/Conv2D"
970 - op: "Conv2D"
971 - input: "last_relu"
972 - input: "conv6_1_h/weights"
973 - attr {
974 - key: "dilations"
975 - value {
976 - list {
977 - i: 1
978 - i: 1
979 - i: 1
980 - i: 1
981 - }
982 - }
983 - }
984 - attr {
985 - key: "padding"
986 - value {
987 - s: "SAME"
988 - }
989 - }
990 - attr {
991 - key: "strides"
992 - value {
993 - list {
994 - i: 1
995 - i: 1
996 - i: 1
997 - i: 1
998 - }
999 - }
1000 - }
1001 -}
1002 -node {
1003 - name: "conv6_1_h/BiasAdd"
1004 - op: "BiasAdd"
1005 - input: "conv6_1_h/Conv2D"
1006 - input: "conv6_1_h/bias"
1007 -}
1008 -node {
1009 - name: "conv6_1_h/Relu"
1010 - op: "Relu"
1011 - input: "conv6_1_h/BiasAdd"
1012 -}
1013 -node {
1014 - name: "conv6_2_h/Conv2D"
1015 - op: "Conv2D"
1016 - input: "conv6_1_h/Relu"
1017 - input: "conv6_2_h/weights"
1018 - attr {
1019 - key: "dilations"
1020 - value {
1021 - list {
1022 - i: 1
1023 - i: 1
1024 - i: 1
1025 - i: 1
1026 - }
1027 - }
1028 - }
1029 - attr {
1030 - key: "padding"
1031 - value {
1032 - s: "SAME"
1033 - }
1034 - }
1035 - attr {
1036 - key: "strides"
1037 - value {
1038 - list {
1039 - i: 1
1040 - i: 2
1041 - i: 2
1042 - i: 1
1043 - }
1044 - }
1045 - }
1046 -}
1047 -node {
1048 - name: "conv6_2_h/BiasAdd"
1049 - op: "BiasAdd"
1050 - input: "conv6_2_h/Conv2D"
1051 - input: "conv6_2_h/bias"
1052 -}
1053 -node {
1054 - name: "conv6_2_h/Relu"
1055 - op: "Relu"
1056 - input: "conv6_2_h/BiasAdd"
1057 -}
1058 -node {
1059 - name: "conv7_1_h/Conv2D"
1060 - op: "Conv2D"
1061 - input: "conv6_2_h/Relu"
1062 - input: "conv7_1_h/weights"
1063 - attr {
1064 - key: "dilations"
1065 - value {
1066 - list {
1067 - i: 1
1068 - i: 1
1069 - i: 1
1070 - i: 1
1071 - }
1072 - }
1073 - }
1074 - attr {
1075 - key: "padding"
1076 - value {
1077 - s: "SAME"
1078 - }
1079 - }
1080 - attr {
1081 - key: "strides"
1082 - value {
1083 - list {
1084 - i: 1
1085 - i: 1
1086 - i: 1
1087 - i: 1
1088 - }
1089 - }
1090 - }
1091 -}
1092 -node {
1093 - name: "conv7_1_h/BiasAdd"
1094 - op: "BiasAdd"
1095 - input: "conv7_1_h/Conv2D"
1096 - input: "conv7_1_h/bias"
1097 -}
1098 -node {
1099 - name: "conv7_1_h/Relu"
1100 - op: "Relu"
1101 - input: "conv7_1_h/BiasAdd"
1102 -}
1103 -node {
1104 - name: "Pad_2"
1105 - op: "SpaceToBatchND"
1106 - input: "conv7_1_h/Relu"
1107 - input: "SpaceToBatchND/block_shape"
1108 - input: "SpaceToBatchND_1/paddings"
1109 -}
1110 -node {
1111 - name: "conv7_2_h/Conv2D"
1112 - op: "Conv2D"
1113 - input: "Pad_2"
1114 - input: "conv7_2_h/weights"
1115 - attr {
1116 - key: "dilations"
1117 - value {
1118 - list {
1119 - i: 1
1120 - i: 1
1121 - i: 1
1122 - i: 1
1123 - }
1124 - }
1125 - }
1126 - attr {
1127 - key: "padding"
1128 - value {
1129 - s: "VALID"
1130 - }
1131 - }
1132 - attr {
1133 - key: "strides"
1134 - value {
1135 - list {
1136 - i: 1
1137 - i: 2
1138 - i: 2
1139 - i: 1
1140 - }
1141 - }
1142 - }
1143 -}
1144 -node {
1145 - name: "conv7_2_h/BiasAdd"
1146 - op: "BiasAdd"
1147 - input: "conv7_2_h/Conv2D"
1148 - input: "conv7_2_h/bias"
1149 -}
1150 -node {
1151 - name: "BatchToSpaceND_2"
1152 - op: "BatchToSpaceND"
1153 - input: "conv7_2_h/BiasAdd"
1154 -}
1155 -node {
1156 - name: "conv7_2_h/Relu"
1157 - op: "Relu"
1158 - input: "BatchToSpaceND_2"
1159 -}
1160 -node {
1161 - name: "conv8_1_h/Conv2D"
1162 - op: "Conv2D"
1163 - input: "conv7_2_h/Relu"
1164 - input: "conv8_1_h/weights"
1165 - attr {
1166 - key: "dilations"
1167 - value {
1168 - list {
1169 - i: 1
1170 - i: 1
1171 - i: 1
1172 - i: 1
1173 - }
1174 - }
1175 - }
1176 - attr {
1177 - key: "padding"
1178 - value {
1179 - s: "SAME"
1180 - }
1181 - }
1182 - attr {
1183 - key: "strides"
1184 - value {
1185 - list {
1186 - i: 1
1187 - i: 1
1188 - i: 1
1189 - i: 1
1190 - }
1191 - }
1192 - }
1193 -}
1194 -node {
1195 - name: "conv8_1_h/BiasAdd"
1196 - op: "BiasAdd"
1197 - input: "conv8_1_h/Conv2D"
1198 - input: "conv8_1_h/bias"
1199 -}
1200 -node {
1201 - name: "conv8_1_h/Relu"
1202 - op: "Relu"
1203 - input: "conv8_1_h/BiasAdd"
1204 -}
1205 -node {
1206 - name: "conv8_2_h/Conv2D"
1207 - op: "Conv2D"
1208 - input: "conv8_1_h/Relu"
1209 - input: "conv8_2_h/weights"
1210 - attr {
1211 - key: "dilations"
1212 - value {
1213 - list {
1214 - i: 1
1215 - i: 1
1216 - i: 1
1217 - i: 1
1218 - }
1219 - }
1220 - }
1221 - attr {
1222 - key: "padding"
1223 - value {
1224 - s: "SAME"
1225 - }
1226 - }
1227 - attr {
1228 - key: "strides"
1229 - value {
1230 - list {
1231 - i: 1
1232 - i: 1
1233 - i: 1
1234 - i: 1
1235 - }
1236 - }
1237 - }
1238 -}
1239 -node {
1240 - name: "conv8_2_h/BiasAdd"
1241 - op: "BiasAdd"
1242 - input: "conv8_2_h/Conv2D"
1243 - input: "conv8_2_h/bias"
1244 -}
1245 -node {
1246 - name: "conv8_2_h/Relu"
1247 - op: "Relu"
1248 - input: "conv8_2_h/BiasAdd"
1249 -}
1250 -node {
1251 - name: "conv9_1_h/Conv2D"
1252 - op: "Conv2D"
1253 - input: "conv8_2_h/Relu"
1254 - input: "conv9_1_h/weights"
1255 - attr {
1256 - key: "dilations"
1257 - value {
1258 - list {
1259 - i: 1
1260 - i: 1
1261 - i: 1
1262 - i: 1
1263 - }
1264 - }
1265 - }
1266 - attr {
1267 - key: "padding"
1268 - value {
1269 - s: "SAME"
1270 - }
1271 - }
1272 - attr {
1273 - key: "strides"
1274 - value {
1275 - list {
1276 - i: 1
1277 - i: 1
1278 - i: 1
1279 - i: 1
1280 - }
1281 - }
1282 - }
1283 -}
1284 -node {
1285 - name: "conv9_1_h/BiasAdd"
1286 - op: "BiasAdd"
1287 - input: "conv9_1_h/Conv2D"
1288 - input: "conv9_1_h/bias"
1289 -}
1290 -node {
1291 - name: "conv9_1_h/Relu"
1292 - op: "Relu"
1293 - input: "conv9_1_h/BiasAdd"
1294 -}
1295 -node {
1296 - name: "conv9_2_h/Conv2D"
1297 - op: "Conv2D"
1298 - input: "conv9_1_h/Relu"
1299 - input: "conv9_2_h/weights"
1300 - attr {
1301 - key: "dilations"
1302 - value {
1303 - list {
1304 - i: 1
1305 - i: 1
1306 - i: 1
1307 - i: 1
1308 - }
1309 - }
1310 - }
1311 - attr {
1312 - key: "padding"
1313 - value {
1314 - s: "SAME"
1315 - }
1316 - }
1317 - attr {
1318 - key: "strides"
1319 - value {
1320 - list {
1321 - i: 1
1322 - i: 1
1323 - i: 1
1324 - i: 1
1325 - }
1326 - }
1327 - }
1328 -}
1329 -node {
1330 - name: "conv9_2_h/BiasAdd"
1331 - op: "BiasAdd"
1332 - input: "conv9_2_h/Conv2D"
1333 - input: "conv9_2_h/bias"
1334 -}
1335 -node {
1336 - name: "conv9_2_h/Relu"
1337 - op: "Relu"
1338 - input: "conv9_2_h/BiasAdd"
1339 -}
1340 -node {
1341 - name: "conv9_2_mbox_loc/Conv2D"
1342 - op: "Conv2D"
1343 - input: "conv9_2_h/Relu"
1344 - input: "conv9_2_mbox_loc/weights"
1345 - attr {
1346 - key: "dilations"
1347 - value {
1348 - list {
1349 - i: 1
1350 - i: 1
1351 - i: 1
1352 - i: 1
1353 - }
1354 - }
1355 - }
1356 - attr {
1357 - key: "padding"
1358 - value {
1359 - s: "SAME"
1360 - }
1361 - }
1362 - attr {
1363 - key: "strides"
1364 - value {
1365 - list {
1366 - i: 1
1367 - i: 1
1368 - i: 1
1369 - i: 1
1370 - }
1371 - }
1372 - }
1373 -}
1374 -node {
1375 - name: "conv9_2_mbox_loc/BiasAdd"
1376 - op: "BiasAdd"
1377 - input: "conv9_2_mbox_loc/Conv2D"
1378 - input: "conv9_2_mbox_loc/bias"
1379 -}
1380 -node {
1381 - name: "flatten_5/Reshape"
1382 - op: "Flatten"
1383 - input: "conv9_2_mbox_loc/BiasAdd"
1384 -}
1385 -node {
1386 - name: "conv9_2_mbox_conf/Conv2D"
1387 - op: "Conv2D"
1388 - input: "conv9_2_h/Relu"
1389 - input: "conv9_2_mbox_conf/weights"
1390 - attr {
1391 - key: "dilations"
1392 - value {
1393 - list {
1394 - i: 1
1395 - i: 1
1396 - i: 1
1397 - i: 1
1398 - }
1399 - }
1400 - }
1401 - attr {
1402 - key: "padding"
1403 - value {
1404 - s: "SAME"
1405 - }
1406 - }
1407 - attr {
1408 - key: "strides"
1409 - value {
1410 - list {
1411 - i: 1
1412 - i: 1
1413 - i: 1
1414 - i: 1
1415 - }
1416 - }
1417 - }
1418 -}
1419 -node {
1420 - name: "conv9_2_mbox_conf/BiasAdd"
1421 - op: "BiasAdd"
1422 - input: "conv9_2_mbox_conf/Conv2D"
1423 - input: "conv9_2_mbox_conf/bias"
1424 -}
1425 -node {
1426 - name: "flatten_11/Reshape"
1427 - op: "Flatten"
1428 - input: "conv9_2_mbox_conf/BiasAdd"
1429 -}
1430 -node {
1431 - name: "conv8_2_mbox_loc/Conv2D"
1432 - op: "Conv2D"
1433 - input: "conv8_2_h/Relu"
1434 - input: "conv8_2_mbox_loc/weights"
1435 - attr {
1436 - key: "dilations"
1437 - value {
1438 - list {
1439 - i: 1
1440 - i: 1
1441 - i: 1
1442 - i: 1
1443 - }
1444 - }
1445 - }
1446 - attr {
1447 - key: "padding"
1448 - value {
1449 - s: "SAME"
1450 - }
1451 - }
1452 - attr {
1453 - key: "strides"
1454 - value {
1455 - list {
1456 - i: 1
1457 - i: 1
1458 - i: 1
1459 - i: 1
1460 - }
1461 - }
1462 - }
1463 -}
1464 -node {
1465 - name: "conv8_2_mbox_loc/BiasAdd"
1466 - op: "BiasAdd"
1467 - input: "conv8_2_mbox_loc/Conv2D"
1468 - input: "conv8_2_mbox_loc/bias"
1469 -}
1470 -node {
1471 - name: "flatten_4/Reshape"
1472 - op: "Flatten"
1473 - input: "conv8_2_mbox_loc/BiasAdd"
1474 -}
1475 -node {
1476 - name: "conv8_2_mbox_conf/Conv2D"
1477 - op: "Conv2D"
1478 - input: "conv8_2_h/Relu"
1479 - input: "conv8_2_mbox_conf/weights"
1480 - attr {
1481 - key: "dilations"
1482 - value {
1483 - list {
1484 - i: 1
1485 - i: 1
1486 - i: 1
1487 - i: 1
1488 - }
1489 - }
1490 - }
1491 - attr {
1492 - key: "padding"
1493 - value {
1494 - s: "SAME"
1495 - }
1496 - }
1497 - attr {
1498 - key: "strides"
1499 - value {
1500 - list {
1501 - i: 1
1502 - i: 1
1503 - i: 1
1504 - i: 1
1505 - }
1506 - }
1507 - }
1508 -}
1509 -node {
1510 - name: "conv8_2_mbox_conf/BiasAdd"
1511 - op: "BiasAdd"
1512 - input: "conv8_2_mbox_conf/Conv2D"
1513 - input: "conv8_2_mbox_conf/bias"
1514 -}
1515 -node {
1516 - name: "flatten_10/Reshape"
1517 - op: "Flatten"
1518 - input: "conv8_2_mbox_conf/BiasAdd"
1519 -}
1520 -node {
1521 - name: "conv7_2_mbox_loc/Conv2D"
1522 - op: "Conv2D"
1523 - input: "conv7_2_h/Relu"
1524 - input: "conv7_2_mbox_loc/weights"
1525 - attr {
1526 - key: "dilations"
1527 - value {
1528 - list {
1529 - i: 1
1530 - i: 1
1531 - i: 1
1532 - i: 1
1533 - }
1534 - }
1535 - }
1536 - attr {
1537 - key: "padding"
1538 - value {
1539 - s: "SAME"
1540 - }
1541 - }
1542 - attr {
1543 - key: "strides"
1544 - value {
1545 - list {
1546 - i: 1
1547 - i: 1
1548 - i: 1
1549 - i: 1
1550 - }
1551 - }
1552 - }
1553 -}
1554 -node {
1555 - name: "conv7_2_mbox_loc/BiasAdd"
1556 - op: "BiasAdd"
1557 - input: "conv7_2_mbox_loc/Conv2D"
1558 - input: "conv7_2_mbox_loc/bias"
1559 -}
1560 -node {
1561 - name: "flatten_3/Reshape"
1562 - op: "Flatten"
1563 - input: "conv7_2_mbox_loc/BiasAdd"
1564 -}
1565 -node {
1566 - name: "conv7_2_mbox_conf/Conv2D"
1567 - op: "Conv2D"
1568 - input: "conv7_2_h/Relu"
1569 - input: "conv7_2_mbox_conf/weights"
1570 - attr {
1571 - key: "dilations"
1572 - value {
1573 - list {
1574 - i: 1
1575 - i: 1
1576 - i: 1
1577 - i: 1
1578 - }
1579 - }
1580 - }
1581 - attr {
1582 - key: "padding"
1583 - value {
1584 - s: "SAME"
1585 - }
1586 - }
1587 - attr {
1588 - key: "strides"
1589 - value {
1590 - list {
1591 - i: 1
1592 - i: 1
1593 - i: 1
1594 - i: 1
1595 - }
1596 - }
1597 - }
1598 -}
1599 -node {
1600 - name: "conv7_2_mbox_conf/BiasAdd"
1601 - op: "BiasAdd"
1602 - input: "conv7_2_mbox_conf/Conv2D"
1603 - input: "conv7_2_mbox_conf/bias"
1604 -}
1605 -node {
1606 - name: "flatten_9/Reshape"
1607 - op: "Flatten"
1608 - input: "conv7_2_mbox_conf/BiasAdd"
1609 -}
1610 -node {
1611 - name: "conv6_2_mbox_loc/Conv2D"
1612 - op: "Conv2D"
1613 - input: "conv6_2_h/Relu"
1614 - input: "conv6_2_mbox_loc/weights"
1615 - attr {
1616 - key: "dilations"
1617 - value {
1618 - list {
1619 - i: 1
1620 - i: 1
1621 - i: 1
1622 - i: 1
1623 - }
1624 - }
1625 - }
1626 - attr {
1627 - key: "padding"
1628 - value {
1629 - s: "SAME"
1630 - }
1631 - }
1632 - attr {
1633 - key: "strides"
1634 - value {
1635 - list {
1636 - i: 1
1637 - i: 1
1638 - i: 1
1639 - i: 1
1640 - }
1641 - }
1642 - }
1643 -}
1644 -node {
1645 - name: "conv6_2_mbox_loc/BiasAdd"
1646 - op: "BiasAdd"
1647 - input: "conv6_2_mbox_loc/Conv2D"
1648 - input: "conv6_2_mbox_loc/bias"
1649 -}
1650 -node {
1651 - name: "flatten_2/Reshape"
1652 - op: "Flatten"
1653 - input: "conv6_2_mbox_loc/BiasAdd"
1654 -}
1655 -node {
1656 - name: "conv6_2_mbox_conf/Conv2D"
1657 - op: "Conv2D"
1658 - input: "conv6_2_h/Relu"
1659 - input: "conv6_2_mbox_conf/weights"
1660 - attr {
1661 - key: "dilations"
1662 - value {
1663 - list {
1664 - i: 1
1665 - i: 1
1666 - i: 1
1667 - i: 1
1668 - }
1669 - }
1670 - }
1671 - attr {
1672 - key: "padding"
1673 - value {
1674 - s: "SAME"
1675 - }
1676 - }
1677 - attr {
1678 - key: "strides"
1679 - value {
1680 - list {
1681 - i: 1
1682 - i: 1
1683 - i: 1
1684 - i: 1
1685 - }
1686 - }
1687 - }
1688 -}
1689 -node {
1690 - name: "conv6_2_mbox_conf/BiasAdd"
1691 - op: "BiasAdd"
1692 - input: "conv6_2_mbox_conf/Conv2D"
1693 - input: "conv6_2_mbox_conf/bias"
1694 -}
1695 -node {
1696 - name: "flatten_8/Reshape"
1697 - op: "Flatten"
1698 - input: "conv6_2_mbox_conf/BiasAdd"
1699 -}
1700 -node {
1701 - name: "fc7_mbox_loc/Conv2D"
1702 - op: "Conv2D"
1703 - input: "last_relu"
1704 - input: "fc7_mbox_loc/weights"
1705 - attr {
1706 - key: "dilations"
1707 - value {
1708 - list {
1709 - i: 1
1710 - i: 1
1711 - i: 1
1712 - i: 1
1713 - }
1714 - }
1715 - }
1716 - attr {
1717 - key: "padding"
1718 - value {
1719 - s: "SAME"
1720 - }
1721 - }
1722 - attr {
1723 - key: "strides"
1724 - value {
1725 - list {
1726 - i: 1
1727 - i: 1
1728 - i: 1
1729 - i: 1
1730 - }
1731 - }
1732 - }
1733 -}
1734 -node {
1735 - name: "fc7_mbox_loc/BiasAdd"
1736 - op: "BiasAdd"
1737 - input: "fc7_mbox_loc/Conv2D"
1738 - input: "fc7_mbox_loc/bias"
1739 -}
1740 -node {
1741 - name: "flatten_1/Reshape"
1742 - op: "Flatten"
1743 - input: "fc7_mbox_loc/BiasAdd"
1744 -}
1745 -node {
1746 - name: "mbox_loc"
1747 - op: "ConcatV2"
1748 - input: "flatten/Reshape"
1749 - input: "flatten_1/Reshape"
1750 - input: "flatten_2/Reshape"
1751 - input: "flatten_3/Reshape"
1752 - input: "flatten_4/Reshape"
1753 - input: "flatten_5/Reshape"
1754 - input: "mbox_loc/axis"
1755 -}
1756 -node {
1757 - name: "fc7_mbox_conf/Conv2D"
1758 - op: "Conv2D"
1759 - input: "last_relu"
1760 - input: "fc7_mbox_conf/weights"
1761 - attr {
1762 - key: "dilations"
1763 - value {
1764 - list {
1765 - i: 1
1766 - i: 1
1767 - i: 1
1768 - i: 1
1769 - }
1770 - }
1771 - }
1772 - attr {
1773 - key: "padding"
1774 - value {
1775 - s: "SAME"
1776 - }
1777 - }
1778 - attr {
1779 - key: "strides"
1780 - value {
1781 - list {
1782 - i: 1
1783 - i: 1
1784 - i: 1
1785 - i: 1
1786 - }
1787 - }
1788 - }
1789 -}
1790 -node {
1791 - name: "fc7_mbox_conf/BiasAdd"
1792 - op: "BiasAdd"
1793 - input: "fc7_mbox_conf/Conv2D"
1794 - input: "fc7_mbox_conf/bias"
1795 -}
1796 -node {
1797 - name: "flatten_7/Reshape"
1798 - op: "Flatten"
1799 - input: "fc7_mbox_conf/BiasAdd"
1800 -}
1801 -node {
1802 - name: "mbox_conf"
1803 - op: "ConcatV2"
1804 - input: "flatten_6/Reshape"
1805 - input: "flatten_7/Reshape"
1806 - input: "flatten_8/Reshape"
1807 - input: "flatten_9/Reshape"
1808 - input: "flatten_10/Reshape"
1809 - input: "flatten_11/Reshape"
1810 - input: "mbox_conf/axis"
1811 -}
1812 -node {
1813 - name: "mbox_conf_reshape"
1814 - op: "Reshape"
1815 - input: "mbox_conf"
1816 - input: "reshape_before_softmax"
1817 -}
1818 -node {
1819 - name: "mbox_conf_softmax"
1820 - op: "Softmax"
1821 - input: "mbox_conf_reshape"
1822 - attr {
1823 - key: "axis"
1824 - value {
1825 - i: 2
1826 - }
1827 - }
1828 -}
1829 -node {
1830 - name: "mbox_conf_flatten"
1831 - op: "Flatten"
1832 - input: "mbox_conf_softmax"
1833 -}
1834 -node {
1835 - name: "PriorBox_0"
1836 - op: "PriorBox"
1837 - input: "conv4_3_norm/mul_1"
1838 - input: "data"
1839 - attr {
1840 - key: "aspect_ratio"
1841 - value {
1842 - tensor {
1843 - dtype: DT_FLOAT
1844 - tensor_shape {
1845 - dim {
1846 - size: 1
1847 - }
1848 - }
1849 - float_val: 2.0
1850 - }
1851 - }
1852 - }
1853 - attr {
1854 - key: "clip"
1855 - value {
1856 - b: false
1857 - }
1858 - }
1859 - attr {
1860 - key: "flip"
1861 - value {
1862 - b: true
1863 - }
1864 - }
1865 - attr {
1866 - key: "max_size"
1867 - value {
1868 - i: 60
1869 - }
1870 - }
1871 - attr {
1872 - key: "min_size"
1873 - value {
1874 - i: 30
1875 - }
1876 - }
1877 - attr {
1878 - key: "offset"
1879 - value {
1880 - f: 0.5
1881 - }
1882 - }
1883 - attr {
1884 - key: "step"
1885 - value {
1886 - f: 8.0
1887 - }
1888 - }
1889 - attr {
1890 - key: "variance"
1891 - value {
1892 - tensor {
1893 - dtype: DT_FLOAT
1894 - tensor_shape {
1895 - dim {
1896 - size: 4
1897 - }
1898 - }
1899 - float_val: 0.10000000149
1900 - float_val: 0.10000000149
1901 - float_val: 0.20000000298
1902 - float_val: 0.20000000298
1903 - }
1904 - }
1905 - }
1906 -}
1907 -node {
1908 - name: "PriorBox_1"
1909 - op: "PriorBox"
1910 - input: "last_relu"
1911 - input: "data"
1912 - attr {
1913 - key: "aspect_ratio"
1914 - value {
1915 - tensor {
1916 - dtype: DT_FLOAT
1917 - tensor_shape {
1918 - dim {
1919 - size: 2
1920 - }
1921 - }
1922 - float_val: 2.0
1923 - float_val: 3.0
1924 - }
1925 - }
1926 - }
1927 - attr {
1928 - key: "clip"
1929 - value {
1930 - b: false
1931 - }
1932 - }
1933 - attr {
1934 - key: "flip"
1935 - value {
1936 - b: true
1937 - }
1938 - }
1939 - attr {
1940 - key: "max_size"
1941 - value {
1942 - i: 111
1943 - }
1944 - }
1945 - attr {
1946 - key: "min_size"
1947 - value {
1948 - i: 60
1949 - }
1950 - }
1951 - attr {
1952 - key: "offset"
1953 - value {
1954 - f: 0.5
1955 - }
1956 - }
1957 - attr {
1958 - key: "step"
1959 - value {
1960 - f: 16.0
1961 - }
1962 - }
1963 - attr {
1964 - key: "variance"
1965 - value {
1966 - tensor {
1967 - dtype: DT_FLOAT
1968 - tensor_shape {
1969 - dim {
1970 - size: 4
1971 - }
1972 - }
1973 - float_val: 0.10000000149
1974 - float_val: 0.10000000149
1975 - float_val: 0.20000000298
1976 - float_val: 0.20000000298
1977 - }
1978 - }
1979 - }
1980 -}
1981 -node {
1982 - name: "PriorBox_2"
1983 - op: "PriorBox"
1984 - input: "conv6_2_h/Relu"
1985 - input: "data"
1986 - attr {
1987 - key: "aspect_ratio"
1988 - value {
1989 - tensor {
1990 - dtype: DT_FLOAT
1991 - tensor_shape {
1992 - dim {
1993 - size: 2
1994 - }
1995 - }
1996 - float_val: 2.0
1997 - float_val: 3.0
1998 - }
1999 - }
2000 - }
2001 - attr {
2002 - key: "clip"
2003 - value {
2004 - b: false
2005 - }
2006 - }
2007 - attr {
2008 - key: "flip"
2009 - value {
2010 - b: true
2011 - }
2012 - }
2013 - attr {
2014 - key: "max_size"
2015 - value {
2016 - i: 162
2017 - }
2018 - }
2019 - attr {
2020 - key: "min_size"
2021 - value {
2022 - i: 111
2023 - }
2024 - }
2025 - attr {
2026 - key: "offset"
2027 - value {
2028 - f: 0.5
2029 - }
2030 - }
2031 - attr {
2032 - key: "step"
2033 - value {
2034 - f: 32.0
2035 - }
2036 - }
2037 - attr {
2038 - key: "variance"
2039 - value {
2040 - tensor {
2041 - dtype: DT_FLOAT
2042 - tensor_shape {
2043 - dim {
2044 - size: 4
2045 - }
2046 - }
2047 - float_val: 0.10000000149
2048 - float_val: 0.10000000149
2049 - float_val: 0.20000000298
2050 - float_val: 0.20000000298
2051 - }
2052 - }
2053 - }
2054 -}
2055 -node {
2056 - name: "PriorBox_3"
2057 - op: "PriorBox"
2058 - input: "conv7_2_h/Relu"
2059 - input: "data"
2060 - attr {
2061 - key: "aspect_ratio"
2062 - value {
2063 - tensor {
2064 - dtype: DT_FLOAT
2065 - tensor_shape {
2066 - dim {
2067 - size: 2
2068 - }
2069 - }
2070 - float_val: 2.0
2071 - float_val: 3.0
2072 - }
2073 - }
2074 - }
2075 - attr {
2076 - key: "clip"
2077 - value {
2078 - b: false
2079 - }
2080 - }
2081 - attr {
2082 - key: "flip"
2083 - value {
2084 - b: true
2085 - }
2086 - }
2087 - attr {
2088 - key: "max_size"
2089 - value {
2090 - i: 213
2091 - }
2092 - }
2093 - attr {
2094 - key: "min_size"
2095 - value {
2096 - i: 162
2097 - }
2098 - }
2099 - attr {
2100 - key: "offset"
2101 - value {
2102 - f: 0.5
2103 - }
2104 - }
2105 - attr {
2106 - key: "step"
2107 - value {
2108 - f: 64.0
2109 - }
2110 - }
2111 - attr {
2112 - key: "variance"
2113 - value {
2114 - tensor {
2115 - dtype: DT_FLOAT
2116 - tensor_shape {
2117 - dim {
2118 - size: 4
2119 - }
2120 - }
2121 - float_val: 0.10000000149
2122 - float_val: 0.10000000149
2123 - float_val: 0.20000000298
2124 - float_val: 0.20000000298
2125 - }
2126 - }
2127 - }
2128 -}
2129 -node {
2130 - name: "PriorBox_4"
2131 - op: "PriorBox"
2132 - input: "conv8_2_h/Relu"
2133 - input: "data"
2134 - attr {
2135 - key: "aspect_ratio"
2136 - value {
2137 - tensor {
2138 - dtype: DT_FLOAT
2139 - tensor_shape {
2140 - dim {
2141 - size: 1
2142 - }
2143 - }
2144 - float_val: 2.0
2145 - }
2146 - }
2147 - }
2148 - attr {
2149 - key: "clip"
2150 - value {
2151 - b: false
2152 - }
2153 - }
2154 - attr {
2155 - key: "flip"
2156 - value {
2157 - b: true
2158 - }
2159 - }
2160 - attr {
2161 - key: "max_size"
2162 - value {
2163 - i: 264
2164 - }
2165 - }
2166 - attr {
2167 - key: "min_size"
2168 - value {
2169 - i: 213
2170 - }
2171 - }
2172 - attr {
2173 - key: "offset"
2174 - value {
2175 - f: 0.5
2176 - }
2177 - }
2178 - attr {
2179 - key: "step"
2180 - value {
2181 - f: 100.0
2182 - }
2183 - }
2184 - attr {
2185 - key: "variance"
2186 - value {
2187 - tensor {
2188 - dtype: DT_FLOAT
2189 - tensor_shape {
2190 - dim {
2191 - size: 4
2192 - }
2193 - }
2194 - float_val: 0.10000000149
2195 - float_val: 0.10000000149
2196 - float_val: 0.20000000298
2197 - float_val: 0.20000000298
2198 - }
2199 - }
2200 - }
2201 -}
2202 -node {
2203 - name: "PriorBox_5"
2204 - op: "PriorBox"
2205 - input: "conv9_2_h/Relu"
2206 - input: "data"
2207 - attr {
2208 - key: "aspect_ratio"
2209 - value {
2210 - tensor {
2211 - dtype: DT_FLOAT
2212 - tensor_shape {
2213 - dim {
2214 - size: 1
2215 - }
2216 - }
2217 - float_val: 2.0
2218 - }
2219 - }
2220 - }
2221 - attr {
2222 - key: "clip"
2223 - value {
2224 - b: false
2225 - }
2226 - }
2227 - attr {
2228 - key: "flip"
2229 - value {
2230 - b: true
2231 - }
2232 - }
2233 - attr {
2234 - key: "max_size"
2235 - value {
2236 - i: 315
2237 - }
2238 - }
2239 - attr {
2240 - key: "min_size"
2241 - value {
2242 - i: 264
2243 - }
2244 - }
2245 - attr {
2246 - key: "offset"
2247 - value {
2248 - f: 0.5
2249 - }
2250 - }
2251 - attr {
2252 - key: "step"
2253 - value {
2254 - f: 300.0
2255 - }
2256 - }
2257 - attr {
2258 - key: "variance"
2259 - value {
2260 - tensor {
2261 - dtype: DT_FLOAT
2262 - tensor_shape {
2263 - dim {
2264 - size: 4
2265 - }
2266 - }
2267 - float_val: 0.10000000149
2268 - float_val: 0.10000000149
2269 - float_val: 0.20000000298
2270 - float_val: 0.20000000298
2271 - }
2272 - }
2273 - }
2274 -}
2275 -node {
2276 - name: "mbox_priorbox"
2277 - op: "ConcatV2"
2278 - input: "PriorBox_0"
2279 - input: "PriorBox_1"
2280 - input: "PriorBox_2"
2281 - input: "PriorBox_3"
2282 - input: "PriorBox_4"
2283 - input: "PriorBox_5"
2284 - input: "mbox_loc/axis"
2285 -}
2286 -node {
2287 - name: "detection_out"
2288 - op: "DetectionOutput"
2289 - input: "mbox_loc"
2290 - input: "mbox_conf_flatten"
2291 - input: "mbox_priorbox"
2292 - attr {
2293 - key: "background_label_id"
2294 - value {
2295 - i: 0
2296 - }
2297 - }
2298 - attr {
2299 - key: "code_type"
2300 - value {
2301 - s: "CENTER_SIZE"
2302 - }
2303 - }
2304 - attr {
2305 - key: "confidence_threshold"
2306 - value {
2307 - f: 0.00999999977648
2308 - }
2309 - }
2310 - attr {
2311 - key: "keep_top_k"
2312 - value {
2313 - i: 200
2314 - }
2315 - }
2316 - attr {
2317 - key: "nms_threshold"
2318 - value {
2319 - f: 0.449999988079
2320 - }
2321 - }
2322 - attr {
2323 - key: "num_classes"
2324 - value {
2325 - i: 2
2326 - }
2327 - }
2328 - attr {
2329 - key: "share_location"
2330 - value {
2331 - b: true
2332 - }
2333 - }
2334 - attr {
2335 - key: "top_k"
2336 - value {
2337 - i: 400
2338 - }
2339 - }
2340 -}
2341 -node {
2342 - name: "reshape_before_softmax"
2343 - op: "Const"
2344 - attr {
2345 - key: "value"
2346 - value {
2347 - tensor {
2348 - dtype: DT_INT32
2349 - tensor_shape {
2350 - dim {
2351 - size: 3
2352 - }
2353 - }
2354 - int_val: 0
2355 - int_val: -1
2356 - int_val: 2
2357 - }
2358 - }
2359 - }
2360 -}
2361 -library {
2362 -}
...@@ -23,13 +23,6 @@ import time ...@@ -23,13 +23,6 @@ import time
23 # model = load_model( 23 # model = load_model(
24 # 'checkpoint/er-best-mobilenet1-bt32-model-classweight-adam.h5') 24 # 'checkpoint/er-best-mobilenet1-bt32-model-classweight-adam.h5')
25 25
26 -
27 -def get_key(val):
28 - for key, value in labels_dict_.items():
29 - if(value == val):
30 - return key
31 -
32 -
33 def convertMillis(millis): 26 def convertMillis(millis):
34 seconds = (millis/1000) % 60 27 seconds = (millis/1000) % 60
35 minutes = (millis/(1000*60)) % 60 28 minutes = (millis/(1000*60)) % 60
...@@ -51,7 +44,7 @@ def videoDetector(input_fps, video_name): ...@@ -51,7 +44,7 @@ def videoDetector(input_fps, video_name):
51 detector = dlib.get_frontal_face_detector() 44 detector = dlib.get_frontal_face_detector()
52 45
53 # face & emotion detection time dict 46 # face & emotion detection time dict
54 - descs = np.load('../static/img/descs.npy', allow_pickle=True)[()] 47 + descs = np.load('static/img/descs.npy', allow_pickle=True)[()]
55 labels_dict_ = {0: 'angry', 1: 'fear', 2: 'happy', 48 labels_dict_ = {0: 'angry', 1: 'fear', 2: 'happy',
56 3: 'neutral', 4: 'sad', 5: 'surprise'} 49 3: 'neutral', 4: 'sad', 5: 'surprise'}
57 face_emotion_dict = {} 50 face_emotion_dict = {}
...@@ -129,3 +122,5 @@ def videoDetector(input_fps, video_name): ...@@ -129,3 +122,5 @@ def videoDetector(input_fps, video_name):
129 for i in range(1, 5): 122 for i in range(1, 5):
130 cv2.destroyAllWindows() 123 cv2.destroyAllWindows()
131 cv2.waitKey(1) 124 cv2.waitKey(1)
125 +
126 + return face_emotion_dict
......
...@@ -9,114 +9,99 @@ import pathlib ...@@ -9,114 +9,99 @@ import pathlib
9 import time 9 import time
10 import pandas as pd 10 import pandas as pd
11 import tensorflow as tf 11 import tensorflow as tf
12 -from tensorflow.keras.preprocessing.image import ImageDataGenerator,load_img 12 +from tensorflow.keras.preprocessing.image import ImageDataGenerator, load_img
13 from tensorflow.keras.models import load_model 13 from tensorflow.keras.models import load_model
14 from tensorflow.keras import regularizers 14 from tensorflow.keras import regularizers
15 from tensorflow import keras 15 from tensorflow import keras
16 import time 16 import time
17 17
18 18
19 -start = time.time()
20 -detector = dlib.get_frontal_face_detector()
21 -predictor = dlib.shape_predictor('./models/shape_predictor_68_face_landmarks.dat')
22 -facerec = dlib.face_recognition_model_v1('./models/dlib_face_recognition_resnet_model_v1.dat')
23 -model = load_model('../checkpoint/er-best-mobilenet1-bt32-model-classweight-adam.h5')
24 -
25 -
26 -def get_key(val):
27 - for key, value in labels_dict_.items():
28 - if(value == val):
29 - return key
30 -
31 -
32 def convertMillis(millis): 19 def convertMillis(millis):
33 - seconds=(millis/1000)%60 20 + seconds = (millis/1000) % 60
34 - minutes=(millis/(1000*60))%60 21 + minutes = (millis/(1000*60)) % 60
35 - hours=(millis/(1000*60*60))%24 22 + hours = (millis/(1000*60*60)) % 24
36 return seconds, int(minutes), int(hours) 23 return seconds, int(minutes), int(hours)
37 24
38 25
39 -def videoDetector(input_fps, video_name): 26 +def videoDetector(second, video_name):
27 +
28 + # face & emotion detection model load
29 + detector = dlib.get_frontal_face_detector()
30 + predictor = dlib.shape_predictor(
31 + 'face_emotion_recognition/models/shape_predictor_68_face_landmarks.dat')
32 + facerec = dlib.face_recognition_model_v1(
33 + 'face_emotion_recognition/models/dlib_face_recognition_resnet_model_v1.dat')
34 + model = load_model(
35 + 'checkpoint/er-best-mobilenet1-bt32-model-classweight-adam.h5')
40 36
41 # face & emotion detection time dict 37 # face & emotion detection time dict
42 - descs = np.load('./img/descs.npy', allow_pickle=True)[()] 38 + descs = np.load('static/img/descs.npy', allow_pickle=True)[()]
43 - labels_dict_ = {0 : 'angry', 1 : 'fear' , 2: 'happy', 3: 'neutral', 4: 'sad', 5: 'surprise'} 39 + labels_dict_ = {0: 'angry', 1: 'fear', 2: 'happy',
40 + 3: 'neutral', 4: 'sad', 5: 'surprise'}
44 face_emotion_dict = {} 41 face_emotion_dict = {}
45 for name, saved_desc in descs.items(): 42 for name, saved_desc in descs.items():
46 - face_emotion_dict[name] = {'angry': [], 'fear': [], 'happy': [], 'neutral': [], 'sad': [], 'surprise': []} 43 + face_emotion_dict[name] = {'angry': [], 'fear': [
47 - 44 + ], 'happy': [], 'neutral': [], 'sad': [], 'surprise': []}
48 45
49 # video 정보 불러오기 46 # video 정보 불러오기
50 - video_path = './data/' + video_name + '.mp4' 47 + video_path = 'static/video/' + video_name + '.mp4'
51 - cap=cv2.VideoCapture(video_path) 48 + cap = cv2.VideoCapture(video_path)
52 49
53 # 동영상 크기(frame정보)를 읽어옴 50 # 동영상 크기(frame정보)를 읽어옴
54 - frameWidth = int(cap.get(cv2.CAP_PROP_FRAME_WIDTH)) 51 + fps = cap.get(cv2.CAP_PROP_FPS)
55 - frameHeight = int(cap.get(cv2.CAP_PROP_FRAME_HEIGHT)) 52 + multiplier = fps * second
56 - frame_size = (frameWidth, frameHeight)
57 - fps = cap.get((cv2.CAP_PROP_FPS))
58 - print(fps)
59 -
60 53
61 - _, img_bgr = cap.read() # (800, 1920, 3) 54 + frameCount = 0
62 - padding_size = 0 55 + ret = 1
63 - resized_width = 1920
64 - video_size = (resized_width, int(img_bgr.shape[0] * resized_width // img_bgr.shape[1]))
65 - timestamps = [cap.get(cv2.CAP_PROP_POS_MSEC)]
66 - prev_time = 0
67 56
68 - fourcc = cv2.VideoWriter_fourcc('m', 'p', '4', 'v') 57 + while ret:
69 - while True: 58 + frameId = int(round(cap.get(1))) # 현재 프레임 번호 가져오기
70 - retval, frameBGR = cap.read() # 영상을 한 frame씩 읽어오기 59 + ret, frameBGR = cap.read() # 영상을 한 frame씩 읽어오기
71 - current_time = time.time() - prev_time
72 60
73 if(type(frameBGR) == type(None)): 61 if(type(frameBGR) == type(None)):
74 pass 62 pass
75 else: 63 else:
76 - frameBGR = cv2.resize(frameBGR, video_size)
77 frame = cv2.cvtColor(frameBGR, cv2.COLOR_BGR2RGB) 64 frame = cv2.cvtColor(frameBGR, cv2.COLOR_BGR2RGB)
78 65
79 - if (retval is True) and (current_time > 1.5) : 66 + if (ret is True) and (frameId % multiplier < 1):
80 - prev_time = time.time()
81 faces = detector(frame, 1) 67 faces = detector(frame, 1)
82 68
83 for (i, face) in enumerate(faces): 69 for (i, face) in enumerate(faces):
84 - shape = predictor(frame, face) 70 + try:
85 - face_descriptor = facerec.compute_face_descriptor(frame, shape) 71 + shape = predictor(frame, face)
86 - 72 + face_descriptor = facerec.compute_face_descriptor(
87 - img = cv2.resize(frame[face.top():face.bottom(), face.left():face.right()], dsize=(224, 224), interpolation = cv2.INTER_CUBIC) 73 + frame, shape)
88 - imgarr = np.array(img).reshape(1, 224, 224, 3) /255 74 +
89 - emotion = labels_dict_[model.predict(imgarr).argmax(axis=-1)[0]] 75 + img = cv2.resize(frame[face.top():face.bottom(), face.left(
90 - 76 + ):face.right()], dsize=(224, 224), interpolation=cv2.INTER_CUBIC)
91 - last_found = {'name': 'unknown', 'dist': 0.6, 'color': (0,0,255)} 77 + imgarr = np.array(img).reshape(1, 224, 224, 3) / 255
92 - 78 + emotion = labels_dict_[
93 - for name, saved_desc in descs.items(): 79 + model.predict(imgarr).argmax(axis=-1)[0]]
94 - dist = np.linalg.norm([face_descriptor] - saved_desc, axis=1) 80 +
95 - if dist < last_found['dist']: 81 + last_found = {'name': 'unknown',
96 - last_found = {'name': name, 'dist': dist, 'color': (255,255,255)} 82 + 'dist': 0.6, 'color': (0, 0, 255)}
97 - 83 +
98 - cv2.rectangle(frameBGR, pt1=(face.left(), face.top()), pt2=(face.right(), face.bottom()), color=last_found['color'], thickness=2) 84 + for name, saved_desc in descs.items():
99 - cv2.putText(frameBGR, last_found['name'] + ',' + emotion , org=(face.left(), face.top()), fontFace=cv2.FONT_HERSHEY_SIMPLEX, fontScale=1, color=last_found['color'], thickness=2) 85 + dist = np.linalg.norm(
100 - 86 + [face_descriptor] - saved_desc, axis=1)
101 - con_sec, con_min, con_hour = convertMillis(cap.get(cv2.CAP_PROP_POS_MSEC)) 87 + if dist < last_found['dist']:
102 - face_emotion_dict[last_found['name']][emotion].append("{0}:{1}:{2}".format(con_hour, con_min, round(con_sec, 3))) 88 + last_found = {
103 - print("{0}:{1}:{2} {3}".format(con_hour, con_min, round(con_sec, 3), emotion)) 89 + 'name': name, 'dist': dist, 'color': (255, 255, 255)}
104 - 90 +
105 - cv2.imshow('frame', frameBGR) 91 + cv2.rectangle(frameBGR, pt1=(face.left(), face.top()), pt2=(
106 - 92 + face.right(), face.bottom()), color=last_found['color'], thickness=2)
107 - key = cv2.waitKey(25) 93 + cv2.putText(frameBGR, last_found['name'] + ',' + emotion, org=(face.left(), face.top(
108 - if key == 27 : 94 + )), fontFace=cv2.FONT_HERSHEY_SIMPLEX, fontScale=1, color=last_found['color'], thickness=2)
109 - break 95 +
110 - 96 + con_sec, con_min, con_hour = convertMillis(
97 + cap.get(cv2.CAP_PROP_POS_MSEC))
98 + face_emotion_dict[last_found['name']][emotion].append(
99 + "{0}:{1}:{2}".format(con_hour, con_min, round(con_sec, 3)))
100 + print("{0}:{1}:{2} {3}".format(
101 + con_hour, con_min, round(con_sec, 3), emotion))
102 + except Exception as e:
103 + print(str(e))
104 +
105 + frameCount += 1
111 print(face_emotion_dict) 106 print(face_emotion_dict)
112 - print("총 시간 : ", time.time() - start)
113 - if cap.isOpened():
114 - cap.release()
115 -
116 - for i in range(1,5):
117 - cv2.destroyAllWindows()
118 - cv2.waitKey(1)
119 -
120 -
121 -if __name__ == '__main__':
122 - videoDetector(3, 'zoom_1')
...\ No newline at end of file ...\ No newline at end of file
107 + return face_emotion_dict
......
1 +<!doctype html>
2 +<html lang="en">
3 +
4 +<head>
5 + <meta charset="UTF-8">
6 + <meta name="viewport"
7 + content="width=device-width, user-scalable=no, initial-scale=1.0, maximum-scale=1.0, minimum-scale=1.0">
8 + <meta http-equiv="X-UA-Compatible" content="ie=edge">
9 + <link rel="stylesheet" href="https://maxcdn.bootstrapcdn.com/bootstrap/4.0.0/css/bootstrap.min.css"
10 + integrity="sha384-Gn5384xqQ1aoWXA+058RXPxPg6fy4IWvTNh0E263XmFcJlSAwiGgFAW/dAiS6JXm" crossorigin="anonymous">
11 + <title>Flask Face Emotion Recognition App</title>
12 +</head>
13 +
14 +<body>
15 +
16 + <div class="container" style="margin-top: 100px">
17 + <h3>Face Emotion Recognition Platform</h3>
18 + <hr>
19 +
20 + <form action="http://localhost:5000/uploadFace" method="POST" enctype="multipart/form-data">
21 + <div class="form-group">
22 + <label for="title" class="text-uppercase">Video Upload</label>
23 + <input type="file" name="file">
24 + <button type="submit" class="btn btn-outline-primary">Add</button>
25 + </div>
26 + </form>
27 +
28 + <video autoplay width="320" height="240" controls>
29 + <source src={{ url_for('static', filename="video/zoom_1.mp4") }} type="video/mp4">
30 + </video>
31 +
32 + <a href="/faceEmotinoRecognition" class="btn btn-outline-primary">얼굴 감정 인식 분석하기</a>
33 +
34 +
35 + <table class="table">
36 + <thead>
37 + <tr>
38 + <th scope="col ">name</th>
39 + <th scope="col">happy</th>
40 + <th scope="col">sad</th>
41 + <th scope="col">fear</th>
42 + <th scope="col">angry</th>
43 + <th scope="col">neutral</th>
44 + <th scope="col">surprise</th>
45 + </tr>
46 + </thead>
47 + <tbody>
48 +
49 + {% for face_img in face_imgs %}
50 + <tr>
51 + <td scope="row">{{ face_img.name }}</td>
52 + {% if face_emotion_dict[face_img.name].happy %}
53 + <td>
54 + {% for time in face_emotion_dict[face_img.name].happy %}
55 + <span>{{time}}</span>
56 + {% endfor %}
57 + </td>
58 + {% else %}
59 + <td> X </td>
60 + {% endif %}
61 +
62 + {% if face_emotion_dict[face_img.name].sad %}
63 + <td>
64 + {% for time in face_emotion_dict[face_img.name].sad %}
65 + <span>{{time}}</span>
66 + {% endfor %}
67 + </td>
68 + {% else %}
69 + <td> X </td>
70 + {% endif %}
71 +
72 + {% if face_emotion_dict[face_img.name].fear %}
73 + <td>
74 + {% for time in face_emotion_dict[face_img.name].fear %}
75 + <span>{{time}}</span>
76 + {% endfor %}
77 + </td>
78 + {% else %}
79 + <td> X </td>
80 + {% endif %}
81 +
82 + {% if face_emotion_dict[face_img.name].angry %}
83 + <td>
84 + {% for time in face_emotion_dict[face_img.name].angry %}
85 + <span>{{time}}</span>
86 + {% endfor %}
87 + </td>
88 + {% else %}
89 + <td> X </td>
90 + {% endif %}
91 +
92 + {% if face_emotion_dict[face_img.name].neutral %}
93 + <td>
94 + {% for time in face_emotion_dict[face_img.name].neutral %}
95 + <span>{{time}}</span>
96 + {% endfor %}
97 + </td>
98 + {% else %}
99 + <td> X </td>
100 + {% endif %}
101 +
102 + {% if face_emotion_dict[face_img.name].surprise %}
103 + <td>
104 + {% for time in face_emotion_dict[face_img.name].surprise %}
105 + <span>{{time}}</span>
106 + {% endfor %}
107 + </td>
108 + {% else %}
109 + <td> X </td>
110 + {% endif %}
111 +
112 + </tr>
113 + {% endfor %}
114 + </tbody>
115 + </table>
116 + <hr/>
117 + </div>
118 +
119 +
120 + <script src="https://code.jquery.com/jquery-3.2.1.slim.min.js"
121 + integrity="sha384-KJ3o2DKtIkvYIK3UENzmM7KCkRr/rE9/Qpg6aAZGJwFDMVNA/GpGFF93hXpG5KkN"
122 + crossorigin="anonymous"></script>
123 + <script src="https://cdnjs.cloudflare.com/ajax/libs/popper.js/1.12.9/umd/popper.min.js"
124 + integrity="sha384-ApNbgh9B+Y1QKtv3Rn7W3mgPxhU9K/ScQsAP7hUibX39j7fakFPskvXusvfa0b4Q"
125 + crossorigin="anonymous"></script>
126 + <script src="https://maxcdn.bootstrapcdn.com/bootstrap/4.0.0/js/bootstrap.min.js"
127 + integrity="sha384-JZR6Spejh4U02d8jOt6vLEHfe/JQGiRRSQQxSfFWpi1MquVdAyjUar5+76PVCmYl"
128 + crossorigin="anonymous"></script>
129 +</body>
130 +
131 +</html>
...\ No newline at end of file ...\ No newline at end of file
...@@ -17,13 +17,21 @@ ...@@ -17,13 +17,21 @@
17 <h3>Face Emotion Recognition Platform</h3> 17 <h3>Face Emotion Recognition Platform</h3>
18 <hr> 18 <hr>
19 19
20 - <form action="http://localhost:5000/uploadFace" method="POST" enctype="multipart/form-data"> 20 + <form action="http://localhost:5000/uploadVideo" method="POST" enctype="multipart/form-data">
21 <div class="form-group"> 21 <div class="form-group">
22 <label for="title" class="text-uppercase">Video Upload</label> 22 <label for="title" class="text-uppercase">Video Upload</label>
23 - <input type="file" name="file"> 23 + <input type="file" name="video">
24 <button type="submit" class="btn btn-outline-primary">Add</button> 24 <button type="submit" class="btn btn-outline-primary">Add</button>
25 </div> 25 </div>
26 </form> 26 </form>
27 +
28 + <video autoplay width="320" height="240" controls>
29 + <source src={{ url_for('static', filename="video/zoom_1.mp4") }} type="video/mp4">
30 + </video>
31 +
32 + <a href="/faceEmotinoRecognition" class="btn btn-outline-primary">얼굴 감정 인식 분석하기</a>
33 +
34 + <hr/>
27 </div> 35 </div>
28 36
29 37
......