김성연

Add weekly report and Refactor flask web code

Showing 24 changed files with 224 additions and 2459 deletions
......@@ -2,7 +2,7 @@ import sys
import os
from flask.helpers import url_for
from face_emotion_recognition import face_recognition, video2
from face_emotion_recognition import face_recognition, video4
from flask import Flask, render_template
from flask.globals import request
from werkzeug.utils import redirect, secure_filename
......@@ -39,7 +39,7 @@ def index():
@app.route('/goTest', methods=('GET', 'POST')) # 접속하는 url
def test():
if request.method == 'GET':
return render_template('test.html', face_imgs=find_face_imgs())
return render_template('test.html')
@app.route('/uploadFace', methods=('GET', 'POST'))
......@@ -61,11 +61,19 @@ def delete_face(face_name):
return redirect(url_for('index'))
@app.route('/uploadVideo')
@app.route('/uploadVideo', methods=('GET', 'POST'))
def upload_video():
f = request.files.get('video')
f.save("./static/video/" + secure_filename(f.filename))
return 'video uploaded successfully'
if request.method == 'POST':
f = request.files.get('video')
f.save("./static/video/" + secure_filename(f.filename))
return redirect(url_for('test'))
@app.route('/faceEmotinoRecognition')
def faceEmotinoRecognition():
face_emotion_dict = video4.videoDetector(3, 'record0')
print(face_emotion_dict)
return render_template('result.html', face_emotion_dict=face_emotion_dict, face_imgs=find_face_imgs())
if __name__ == "__main__":
......
This diff could not be displayed because it is too large.
node {
name: "data"
op: "Placeholder"
attr {
key: "dtype"
value {
type: DT_FLOAT
}
}
}
node {
name: "data_bn/FusedBatchNorm"
op: "FusedBatchNorm"
input: "data:0"
input: "data_bn/gamma"
input: "data_bn/beta"
input: "data_bn/mean"
input: "data_bn/std"
attr {
key: "epsilon"
value {
f: 1.00099996416e-05
}
}
}
node {
name: "data_scale/Mul"
op: "Mul"
input: "data_bn/FusedBatchNorm"
input: "data_scale/mul"
}
node {
name: "data_scale/BiasAdd"
op: "BiasAdd"
input: "data_scale/Mul"
input: "data_scale/add"
}
node {
name: "SpaceToBatchND/block_shape"
op: "Const"
attr {
key: "value"
value {
tensor {
dtype: DT_INT32
tensor_shape {
dim {
size: 2
}
}
int_val: 1
int_val: 1
}
}
}
}
node {
name: "SpaceToBatchND/paddings"
op: "Const"
attr {
key: "value"
value {
tensor {
dtype: DT_INT32
tensor_shape {
dim {
size: 2
}
dim {
size: 2
}
}
int_val: 3
int_val: 3
int_val: 3
int_val: 3
}
}
}
}
node {
name: "Pad"
op: "SpaceToBatchND"
input: "data_scale/BiasAdd"
input: "SpaceToBatchND/block_shape"
input: "SpaceToBatchND/paddings"
}
node {
name: "conv1_h/Conv2D"
op: "Conv2D"
input: "Pad"
input: "conv1_h/weights"
attr {
key: "dilations"
value {
list {
i: 1
i: 1
i: 1
i: 1
}
}
}
attr {
key: "padding"
value {
s: "VALID"
}
}
attr {
key: "strides"
value {
list {
i: 1
i: 2
i: 2
i: 1
}
}
}
}
node {
name: "conv1_h/BiasAdd"
op: "BiasAdd"
input: "conv1_h/Conv2D"
input: "conv1_h/bias"
}
node {
name: "BatchToSpaceND"
op: "BatchToSpaceND"
input: "conv1_h/BiasAdd"
}
node {
name: "conv1_bn_h/FusedBatchNorm"
op: "FusedBatchNorm"
input: "BatchToSpaceND"
input: "conv1_bn_h/gamma"
input: "conv1_bn_h/beta"
input: "conv1_bn_h/mean"
input: "conv1_bn_h/std"
attr {
key: "epsilon"
value {
f: 1.00099996416e-05
}
}
}
node {
name: "conv1_scale_h/Mul"
op: "Mul"
input: "conv1_bn_h/FusedBatchNorm"
input: "conv1_scale_h/mul"
}
node {
name: "conv1_scale_h/BiasAdd"
op: "BiasAdd"
input: "conv1_scale_h/Mul"
input: "conv1_scale_h/add"
}
node {
name: "Relu"
op: "Relu"
input: "conv1_scale_h/BiasAdd"
}
node {
name: "conv1_pool/MaxPool"
op: "MaxPool"
input: "Relu"
attr {
key: "ksize"
value {
list {
i: 1
i: 3
i: 3
i: 1
}
}
}
attr {
key: "padding"
value {
s: "SAME"
}
}
attr {
key: "strides"
value {
list {
i: 1
i: 2
i: 2
i: 1
}
}
}
}
node {
name: "layer_64_1_conv1_h/Conv2D"
op: "Conv2D"
input: "conv1_pool/MaxPool"
input: "layer_64_1_conv1_h/weights"
attr {
key: "dilations"
value {
list {
i: 1
i: 1
i: 1
i: 1
}
}
}
attr {
key: "padding"
value {
s: "SAME"
}
}
attr {
key: "strides"
value {
list {
i: 1
i: 1
i: 1
i: 1
}
}
}
}
node {
name: "layer_64_1_bn2_h/FusedBatchNorm"
op: "BiasAdd"
input: "layer_64_1_conv1_h/Conv2D"
input: "layer_64_1_conv1_h/Conv2D_bn_offset"
}
node {
name: "layer_64_1_scale2_h/Mul"
op: "Mul"
input: "layer_64_1_bn2_h/FusedBatchNorm"
input: "layer_64_1_scale2_h/mul"
}
node {
name: "layer_64_1_scale2_h/BiasAdd"
op: "BiasAdd"
input: "layer_64_1_scale2_h/Mul"
input: "layer_64_1_scale2_h/add"
}
node {
name: "Relu_1"
op: "Relu"
input: "layer_64_1_scale2_h/BiasAdd"
}
node {
name: "layer_64_1_conv2_h/Conv2D"
op: "Conv2D"
input: "Relu_1"
input: "layer_64_1_conv2_h/weights"
attr {
key: "dilations"
value {
list {
i: 1
i: 1
i: 1
i: 1
}
}
}
attr {
key: "padding"
value {
s: "SAME"
}
}
attr {
key: "strides"
value {
list {
i: 1
i: 1
i: 1
i: 1
}
}
}
}
node {
name: "add"
op: "Add"
input: "layer_64_1_conv2_h/Conv2D"
input: "conv1_pool/MaxPool"
}
node {
name: "layer_128_1_bn1_h/FusedBatchNorm"
op: "FusedBatchNorm"
input: "add"
input: "layer_128_1_bn1_h/gamma"
input: "layer_128_1_bn1_h/beta"
input: "layer_128_1_bn1_h/mean"
input: "layer_128_1_bn1_h/std"
attr {
key: "epsilon"
value {
f: 1.00099996416e-05
}
}
}
node {
name: "layer_128_1_scale1_h/Mul"
op: "Mul"
input: "layer_128_1_bn1_h/FusedBatchNorm"
input: "layer_128_1_scale1_h/mul"
}
node {
name: "layer_128_1_scale1_h/BiasAdd"
op: "BiasAdd"
input: "layer_128_1_scale1_h/Mul"
input: "layer_128_1_scale1_h/add"
}
node {
name: "Relu_2"
op: "Relu"
input: "layer_128_1_scale1_h/BiasAdd"
}
node {
name: "layer_128_1_conv_expand_h/Conv2D"
op: "Conv2D"
input: "Relu_2"
input: "layer_128_1_conv_expand_h/weights"
attr {
key: "dilations"
value {
list {
i: 1
i: 1
i: 1
i: 1
}
}
}
attr {
key: "padding"
value {
s: "SAME"
}
}
attr {
key: "strides"
value {
list {
i: 1
i: 2
i: 2
i: 1
}
}
}
}
node {
name: "layer_128_1_conv1_h/Conv2D"
op: "Conv2D"
input: "Relu_2"
input: "layer_128_1_conv1_h/weights"
attr {
key: "dilations"
value {
list {
i: 1
i: 1
i: 1
i: 1
}
}
}
attr {
key: "padding"
value {
s: "SAME"
}
}
attr {
key: "strides"
value {
list {
i: 1
i: 2
i: 2
i: 1
}
}
}
}
node {
name: "layer_128_1_bn2/FusedBatchNorm"
op: "BiasAdd"
input: "layer_128_1_conv1_h/Conv2D"
input: "layer_128_1_conv1_h/Conv2D_bn_offset"
}
node {
name: "layer_128_1_scale2/Mul"
op: "Mul"
input: "layer_128_1_bn2/FusedBatchNorm"
input: "layer_128_1_scale2/mul"
}
node {
name: "layer_128_1_scale2/BiasAdd"
op: "BiasAdd"
input: "layer_128_1_scale2/Mul"
input: "layer_128_1_scale2/add"
}
node {
name: "Relu_3"
op: "Relu"
input: "layer_128_1_scale2/BiasAdd"
}
node {
name: "layer_128_1_conv2/Conv2D"
op: "Conv2D"
input: "Relu_3"
input: "layer_128_1_conv2/weights"
attr {
key: "dilations"
value {
list {
i: 1
i: 1
i: 1
i: 1
}
}
}
attr {
key: "padding"
value {
s: "SAME"
}
}
attr {
key: "strides"
value {
list {
i: 1
i: 1
i: 1
i: 1
}
}
}
}
node {
name: "add_1"
op: "Add"
input: "layer_128_1_conv2/Conv2D"
input: "layer_128_1_conv_expand_h/Conv2D"
}
node {
name: "layer_256_1_bn1/FusedBatchNorm"
op: "FusedBatchNorm"
input: "add_1"
input: "layer_256_1_bn1/gamma"
input: "layer_256_1_bn1/beta"
input: "layer_256_1_bn1/mean"
input: "layer_256_1_bn1/std"
attr {
key: "epsilon"
value {
f: 1.00099996416e-05
}
}
}
node {
name: "layer_256_1_scale1/Mul"
op: "Mul"
input: "layer_256_1_bn1/FusedBatchNorm"
input: "layer_256_1_scale1/mul"
}
node {
name: "layer_256_1_scale1/BiasAdd"
op: "BiasAdd"
input: "layer_256_1_scale1/Mul"
input: "layer_256_1_scale1/add"
}
node {
name: "Relu_4"
op: "Relu"
input: "layer_256_1_scale1/BiasAdd"
}
node {
name: "SpaceToBatchND_1/paddings"
op: "Const"
attr {
key: "value"
value {
tensor {
dtype: DT_INT32
tensor_shape {
dim {
size: 2
}
dim {
size: 2
}
}
int_val: 1
int_val: 1
int_val: 1
int_val: 1
}
}
}
}
node {
name: "layer_256_1_conv_expand/Conv2D"
op: "Conv2D"
input: "Relu_4"
input: "layer_256_1_conv_expand/weights"
attr {
key: "dilations"
value {
list {
i: 1
i: 1
i: 1
i: 1
}
}
}
attr {
key: "padding"
value {
s: "SAME"
}
}
attr {
key: "strides"
value {
list {
i: 1
i: 2
i: 2
i: 1
}
}
}
}
node {
name: "conv4_3_norm/l2_normalize"
op: "L2Normalize"
input: "Relu_4:0"
input: "conv4_3_norm/l2_normalize/Sum/reduction_indices"
}
node {
name: "conv4_3_norm/mul_1"
op: "Mul"
input: "conv4_3_norm/l2_normalize"
input: "conv4_3_norm/mul"
}
node {
name: "conv4_3_norm_mbox_loc/Conv2D"
op: "Conv2D"
input: "conv4_3_norm/mul_1"
input: "conv4_3_norm_mbox_loc/weights"
attr {
key: "dilations"
value {
list {
i: 1
i: 1
i: 1
i: 1
}
}
}
attr {
key: "padding"
value {
s: "SAME"
}
}
attr {
key: "strides"
value {
list {
i: 1
i: 1
i: 1
i: 1
}
}
}
}
node {
name: "conv4_3_norm_mbox_loc/BiasAdd"
op: "BiasAdd"
input: "conv4_3_norm_mbox_loc/Conv2D"
input: "conv4_3_norm_mbox_loc/bias"
}
node {
name: "flatten/Reshape"
op: "Flatten"
input: "conv4_3_norm_mbox_loc/BiasAdd"
}
node {
name: "conv4_3_norm_mbox_conf/Conv2D"
op: "Conv2D"
input: "conv4_3_norm/mul_1"
input: "conv4_3_norm_mbox_conf/weights"
attr {
key: "dilations"
value {
list {
i: 1
i: 1
i: 1
i: 1
}
}
}
attr {
key: "padding"
value {
s: "SAME"
}
}
attr {
key: "strides"
value {
list {
i: 1
i: 1
i: 1
i: 1
}
}
}
}
node {
name: "conv4_3_norm_mbox_conf/BiasAdd"
op: "BiasAdd"
input: "conv4_3_norm_mbox_conf/Conv2D"
input: "conv4_3_norm_mbox_conf/bias"
}
node {
name: "flatten_6/Reshape"
op: "Flatten"
input: "conv4_3_norm_mbox_conf/BiasAdd"
}
node {
name: "Pad_1"
op: "SpaceToBatchND"
input: "Relu_4"
input: "SpaceToBatchND/block_shape"
input: "SpaceToBatchND_1/paddings"
}
node {
name: "layer_256_1_conv1/Conv2D"
op: "Conv2D"
input: "Pad_1"
input: "layer_256_1_conv1/weights"
attr {
key: "dilations"
value {
list {
i: 1
i: 1
i: 1
i: 1
}
}
}
attr {
key: "padding"
value {
s: "VALID"
}
}
attr {
key: "strides"
value {
list {
i: 1
i: 2
i: 2
i: 1
}
}
}
}
node {
name: "layer_256_1_bn2/FusedBatchNorm"
op: "BiasAdd"
input: "layer_256_1_conv1/Conv2D"
input: "layer_256_1_conv1/Conv2D_bn_offset"
}
node {
name: "BatchToSpaceND_1"
op: "BatchToSpaceND"
input: "layer_256_1_bn2/FusedBatchNorm"
}
node {
name: "layer_256_1_scale2/Mul"
op: "Mul"
input: "BatchToSpaceND_1"
input: "layer_256_1_scale2/mul"
}
node {
name: "layer_256_1_scale2/BiasAdd"
op: "BiasAdd"
input: "layer_256_1_scale2/Mul"
input: "layer_256_1_scale2/add"
}
node {
name: "Relu_5"
op: "Relu"
input: "layer_256_1_scale2/BiasAdd"
}
node {
name: "layer_256_1_conv2/Conv2D"
op: "Conv2D"
input: "Relu_5"
input: "layer_256_1_conv2/weights"
attr {
key: "dilations"
value {
list {
i: 1
i: 1
i: 1
i: 1
}
}
}
attr {
key: "padding"
value {
s: "SAME"
}
}
attr {
key: "strides"
value {
list {
i: 1
i: 1
i: 1
i: 1
}
}
}
}
node {
name: "add_2"
op: "Add"
input: "layer_256_1_conv2/Conv2D"
input: "layer_256_1_conv_expand/Conv2D"
}
node {
name: "layer_512_1_bn1/FusedBatchNorm"
op: "FusedBatchNorm"
input: "add_2"
input: "layer_512_1_bn1/gamma"
input: "layer_512_1_bn1/beta"
input: "layer_512_1_bn1/mean"
input: "layer_512_1_bn1/std"
attr {
key: "epsilon"
value {
f: 1.00099996416e-05
}
}
}
node {
name: "layer_512_1_scale1/Mul"
op: "Mul"
input: "layer_512_1_bn1/FusedBatchNorm"
input: "layer_512_1_scale1/mul"
}
node {
name: "layer_512_1_scale1/BiasAdd"
op: "BiasAdd"
input: "layer_512_1_scale1/Mul"
input: "layer_512_1_scale1/add"
}
node {
name: "Relu_6"
op: "Relu"
input: "layer_512_1_scale1/BiasAdd"
}
node {
name: "layer_512_1_conv_expand_h/Conv2D"
op: "Conv2D"
input: "Relu_6"
input: "layer_512_1_conv_expand_h/weights"
attr {
key: "dilations"
value {
list {
i: 1
i: 1
i: 1
i: 1
}
}
}
attr {
key: "padding"
value {
s: "SAME"
}
}
attr {
key: "strides"
value {
list {
i: 1
i: 1
i: 1
i: 1
}
}
}
}
node {
name: "layer_512_1_conv1_h/Conv2D"
op: "Conv2D"
input: "Relu_6"
input: "layer_512_1_conv1_h/weights"
attr {
key: "dilations"
value {
list {
i: 1
i: 1
i: 1
i: 1
}
}
}
attr {
key: "padding"
value {
s: "SAME"
}
}
attr {
key: "strides"
value {
list {
i: 1
i: 1
i: 1
i: 1
}
}
}
}
node {
name: "layer_512_1_bn2_h/FusedBatchNorm"
op: "BiasAdd"
input: "layer_512_1_conv1_h/Conv2D"
input: "layer_512_1_conv1_h/Conv2D_bn_offset"
}
node {
name: "layer_512_1_scale2_h/Mul"
op: "Mul"
input: "layer_512_1_bn2_h/FusedBatchNorm"
input: "layer_512_1_scale2_h/mul"
}
node {
name: "layer_512_1_scale2_h/BiasAdd"
op: "BiasAdd"
input: "layer_512_1_scale2_h/Mul"
input: "layer_512_1_scale2_h/add"
}
node {
name: "Relu_7"
op: "Relu"
input: "layer_512_1_scale2_h/BiasAdd"
}
node {
name: "layer_512_1_conv2_h/convolution/SpaceToBatchND"
op: "SpaceToBatchND"
input: "Relu_7"
input: "layer_512_1_conv2_h/convolution/SpaceToBatchND/block_shape"
input: "layer_512_1_conv2_h/convolution/SpaceToBatchND/paddings"
}
node {
name: "layer_512_1_conv2_h/convolution"
op: "Conv2D"
input: "layer_512_1_conv2_h/convolution/SpaceToBatchND"
input: "layer_512_1_conv2_h/weights"
attr {
key: "dilations"
value {
list {
i: 1
i: 1
i: 1
i: 1
}
}
}
attr {
key: "padding"
value {
s: "VALID"
}
}
attr {
key: "strides"
value {
list {
i: 1
i: 1
i: 1
i: 1
}
}
}
}
node {
name: "layer_512_1_conv2_h/convolution/BatchToSpaceND"
op: "BatchToSpaceND"
input: "layer_512_1_conv2_h/convolution"
input: "layer_512_1_conv2_h/convolution/BatchToSpaceND/block_shape"
input: "layer_512_1_conv2_h/convolution/BatchToSpaceND/crops"
}
node {
name: "add_3"
op: "Add"
input: "layer_512_1_conv2_h/convolution/BatchToSpaceND"
input: "layer_512_1_conv_expand_h/Conv2D"
}
node {
name: "last_bn_h/FusedBatchNorm"
op: "FusedBatchNorm"
input: "add_3"
input: "last_bn_h/gamma"
input: "last_bn_h/beta"
input: "last_bn_h/mean"
input: "last_bn_h/std"
attr {
key: "epsilon"
value {
f: 1.00099996416e-05
}
}
}
node {
name: "last_scale_h/Mul"
op: "Mul"
input: "last_bn_h/FusedBatchNorm"
input: "last_scale_h/mul"
}
node {
name: "last_scale_h/BiasAdd"
op: "BiasAdd"
input: "last_scale_h/Mul"
input: "last_scale_h/add"
}
node {
name: "last_relu"
op: "Relu"
input: "last_scale_h/BiasAdd"
}
node {
name: "conv6_1_h/Conv2D"
op: "Conv2D"
input: "last_relu"
input: "conv6_1_h/weights"
attr {
key: "dilations"
value {
list {
i: 1
i: 1
i: 1
i: 1
}
}
}
attr {
key: "padding"
value {
s: "SAME"
}
}
attr {
key: "strides"
value {
list {
i: 1
i: 1
i: 1
i: 1
}
}
}
}
node {
name: "conv6_1_h/BiasAdd"
op: "BiasAdd"
input: "conv6_1_h/Conv2D"
input: "conv6_1_h/bias"
}
node {
name: "conv6_1_h/Relu"
op: "Relu"
input: "conv6_1_h/BiasAdd"
}
node {
name: "conv6_2_h/Conv2D"
op: "Conv2D"
input: "conv6_1_h/Relu"
input: "conv6_2_h/weights"
attr {
key: "dilations"
value {
list {
i: 1
i: 1
i: 1
i: 1
}
}
}
attr {
key: "padding"
value {
s: "SAME"
}
}
attr {
key: "strides"
value {
list {
i: 1
i: 2
i: 2
i: 1
}
}
}
}
node {
name: "conv6_2_h/BiasAdd"
op: "BiasAdd"
input: "conv6_2_h/Conv2D"
input: "conv6_2_h/bias"
}
node {
name: "conv6_2_h/Relu"
op: "Relu"
input: "conv6_2_h/BiasAdd"
}
node {
name: "conv7_1_h/Conv2D"
op: "Conv2D"
input: "conv6_2_h/Relu"
input: "conv7_1_h/weights"
attr {
key: "dilations"
value {
list {
i: 1
i: 1
i: 1
i: 1
}
}
}
attr {
key: "padding"
value {
s: "SAME"
}
}
attr {
key: "strides"
value {
list {
i: 1
i: 1
i: 1
i: 1
}
}
}
}
node {
name: "conv7_1_h/BiasAdd"
op: "BiasAdd"
input: "conv7_1_h/Conv2D"
input: "conv7_1_h/bias"
}
node {
name: "conv7_1_h/Relu"
op: "Relu"
input: "conv7_1_h/BiasAdd"
}
node {
name: "Pad_2"
op: "SpaceToBatchND"
input: "conv7_1_h/Relu"
input: "SpaceToBatchND/block_shape"
input: "SpaceToBatchND_1/paddings"
}
node {
name: "conv7_2_h/Conv2D"
op: "Conv2D"
input: "Pad_2"
input: "conv7_2_h/weights"
attr {
key: "dilations"
value {
list {
i: 1
i: 1
i: 1
i: 1
}
}
}
attr {
key: "padding"
value {
s: "VALID"
}
}
attr {
key: "strides"
value {
list {
i: 1
i: 2
i: 2
i: 1
}
}
}
}
node {
name: "conv7_2_h/BiasAdd"
op: "BiasAdd"
input: "conv7_2_h/Conv2D"
input: "conv7_2_h/bias"
}
node {
name: "BatchToSpaceND_2"
op: "BatchToSpaceND"
input: "conv7_2_h/BiasAdd"
}
node {
name: "conv7_2_h/Relu"
op: "Relu"
input: "BatchToSpaceND_2"
}
node {
name: "conv8_1_h/Conv2D"
op: "Conv2D"
input: "conv7_2_h/Relu"
input: "conv8_1_h/weights"
attr {
key: "dilations"
value {
list {
i: 1
i: 1
i: 1
i: 1
}
}
}
attr {
key: "padding"
value {
s: "SAME"
}
}
attr {
key: "strides"
value {
list {
i: 1
i: 1
i: 1
i: 1
}
}
}
}
node {
name: "conv8_1_h/BiasAdd"
op: "BiasAdd"
input: "conv8_1_h/Conv2D"
input: "conv8_1_h/bias"
}
node {
name: "conv8_1_h/Relu"
op: "Relu"
input: "conv8_1_h/BiasAdd"
}
node {
name: "conv8_2_h/Conv2D"
op: "Conv2D"
input: "conv8_1_h/Relu"
input: "conv8_2_h/weights"
attr {
key: "dilations"
value {
list {
i: 1
i: 1
i: 1
i: 1
}
}
}
attr {
key: "padding"
value {
s: "SAME"
}
}
attr {
key: "strides"
value {
list {
i: 1
i: 1
i: 1
i: 1
}
}
}
}
node {
name: "conv8_2_h/BiasAdd"
op: "BiasAdd"
input: "conv8_2_h/Conv2D"
input: "conv8_2_h/bias"
}
node {
name: "conv8_2_h/Relu"
op: "Relu"
input: "conv8_2_h/BiasAdd"
}
node {
name: "conv9_1_h/Conv2D"
op: "Conv2D"
input: "conv8_2_h/Relu"
input: "conv9_1_h/weights"
attr {
key: "dilations"
value {
list {
i: 1
i: 1
i: 1
i: 1
}
}
}
attr {
key: "padding"
value {
s: "SAME"
}
}
attr {
key: "strides"
value {
list {
i: 1
i: 1
i: 1
i: 1
}
}
}
}
node {
name: "conv9_1_h/BiasAdd"
op: "BiasAdd"
input: "conv9_1_h/Conv2D"
input: "conv9_1_h/bias"
}
node {
name: "conv9_1_h/Relu"
op: "Relu"
input: "conv9_1_h/BiasAdd"
}
node {
name: "conv9_2_h/Conv2D"
op: "Conv2D"
input: "conv9_1_h/Relu"
input: "conv9_2_h/weights"
attr {
key: "dilations"
value {
list {
i: 1
i: 1
i: 1
i: 1
}
}
}
attr {
key: "padding"
value {
s: "SAME"
}
}
attr {
key: "strides"
value {
list {
i: 1
i: 1
i: 1
i: 1
}
}
}
}
node {
name: "conv9_2_h/BiasAdd"
op: "BiasAdd"
input: "conv9_2_h/Conv2D"
input: "conv9_2_h/bias"
}
node {
name: "conv9_2_h/Relu"
op: "Relu"
input: "conv9_2_h/BiasAdd"
}
node {
name: "conv9_2_mbox_loc/Conv2D"
op: "Conv2D"
input: "conv9_2_h/Relu"
input: "conv9_2_mbox_loc/weights"
attr {
key: "dilations"
value {
list {
i: 1
i: 1
i: 1
i: 1
}
}
}
attr {
key: "padding"
value {
s: "SAME"
}
}
attr {
key: "strides"
value {
list {
i: 1
i: 1
i: 1
i: 1
}
}
}
}
node {
name: "conv9_2_mbox_loc/BiasAdd"
op: "BiasAdd"
input: "conv9_2_mbox_loc/Conv2D"
input: "conv9_2_mbox_loc/bias"
}
node {
name: "flatten_5/Reshape"
op: "Flatten"
input: "conv9_2_mbox_loc/BiasAdd"
}
node {
name: "conv9_2_mbox_conf/Conv2D"
op: "Conv2D"
input: "conv9_2_h/Relu"
input: "conv9_2_mbox_conf/weights"
attr {
key: "dilations"
value {
list {
i: 1
i: 1
i: 1
i: 1
}
}
}
attr {
key: "padding"
value {
s: "SAME"
}
}
attr {
key: "strides"
value {
list {
i: 1
i: 1
i: 1
i: 1
}
}
}
}
node {
name: "conv9_2_mbox_conf/BiasAdd"
op: "BiasAdd"
input: "conv9_2_mbox_conf/Conv2D"
input: "conv9_2_mbox_conf/bias"
}
node {
name: "flatten_11/Reshape"
op: "Flatten"
input: "conv9_2_mbox_conf/BiasAdd"
}
node {
name: "conv8_2_mbox_loc/Conv2D"
op: "Conv2D"
input: "conv8_2_h/Relu"
input: "conv8_2_mbox_loc/weights"
attr {
key: "dilations"
value {
list {
i: 1
i: 1
i: 1
i: 1
}
}
}
attr {
key: "padding"
value {
s: "SAME"
}
}
attr {
key: "strides"
value {
list {
i: 1
i: 1
i: 1
i: 1
}
}
}
}
node {
name: "conv8_2_mbox_loc/BiasAdd"
op: "BiasAdd"
input: "conv8_2_mbox_loc/Conv2D"
input: "conv8_2_mbox_loc/bias"
}
node {
name: "flatten_4/Reshape"
op: "Flatten"
input: "conv8_2_mbox_loc/BiasAdd"
}
node {
name: "conv8_2_mbox_conf/Conv2D"
op: "Conv2D"
input: "conv8_2_h/Relu"
input: "conv8_2_mbox_conf/weights"
attr {
key: "dilations"
value {
list {
i: 1
i: 1
i: 1
i: 1
}
}
}
attr {
key: "padding"
value {
s: "SAME"
}
}
attr {
key: "strides"
value {
list {
i: 1
i: 1
i: 1
i: 1
}
}
}
}
node {
name: "conv8_2_mbox_conf/BiasAdd"
op: "BiasAdd"
input: "conv8_2_mbox_conf/Conv2D"
input: "conv8_2_mbox_conf/bias"
}
node {
name: "flatten_10/Reshape"
op: "Flatten"
input: "conv8_2_mbox_conf/BiasAdd"
}
node {
name: "conv7_2_mbox_loc/Conv2D"
op: "Conv2D"
input: "conv7_2_h/Relu"
input: "conv7_2_mbox_loc/weights"
attr {
key: "dilations"
value {
list {
i: 1
i: 1
i: 1
i: 1
}
}
}
attr {
key: "padding"
value {
s: "SAME"
}
}
attr {
key: "strides"
value {
list {
i: 1
i: 1
i: 1
i: 1
}
}
}
}
node {
name: "conv7_2_mbox_loc/BiasAdd"
op: "BiasAdd"
input: "conv7_2_mbox_loc/Conv2D"
input: "conv7_2_mbox_loc/bias"
}
node {
name: "flatten_3/Reshape"
op: "Flatten"
input: "conv7_2_mbox_loc/BiasAdd"
}
node {
name: "conv7_2_mbox_conf/Conv2D"
op: "Conv2D"
input: "conv7_2_h/Relu"
input: "conv7_2_mbox_conf/weights"
attr {
key: "dilations"
value {
list {
i: 1
i: 1
i: 1
i: 1
}
}
}
attr {
key: "padding"
value {
s: "SAME"
}
}
attr {
key: "strides"
value {
list {
i: 1
i: 1
i: 1
i: 1
}
}
}
}
node {
name: "conv7_2_mbox_conf/BiasAdd"
op: "BiasAdd"
input: "conv7_2_mbox_conf/Conv2D"
input: "conv7_2_mbox_conf/bias"
}
node {
name: "flatten_9/Reshape"
op: "Flatten"
input: "conv7_2_mbox_conf/BiasAdd"
}
node {
name: "conv6_2_mbox_loc/Conv2D"
op: "Conv2D"
input: "conv6_2_h/Relu"
input: "conv6_2_mbox_loc/weights"
attr {
key: "dilations"
value {
list {
i: 1
i: 1
i: 1
i: 1
}
}
}
attr {
key: "padding"
value {
s: "SAME"
}
}
attr {
key: "strides"
value {
list {
i: 1
i: 1
i: 1
i: 1
}
}
}
}
node {
name: "conv6_2_mbox_loc/BiasAdd"
op: "BiasAdd"
input: "conv6_2_mbox_loc/Conv2D"
input: "conv6_2_mbox_loc/bias"
}
node {
name: "flatten_2/Reshape"
op: "Flatten"
input: "conv6_2_mbox_loc/BiasAdd"
}
node {
name: "conv6_2_mbox_conf/Conv2D"
op: "Conv2D"
input: "conv6_2_h/Relu"
input: "conv6_2_mbox_conf/weights"
attr {
key: "dilations"
value {
list {
i: 1
i: 1
i: 1
i: 1
}
}
}
attr {
key: "padding"
value {
s: "SAME"
}
}
attr {
key: "strides"
value {
list {
i: 1
i: 1
i: 1
i: 1
}
}
}
}
node {
name: "conv6_2_mbox_conf/BiasAdd"
op: "BiasAdd"
input: "conv6_2_mbox_conf/Conv2D"
input: "conv6_2_mbox_conf/bias"
}
node {
name: "flatten_8/Reshape"
op: "Flatten"
input: "conv6_2_mbox_conf/BiasAdd"
}
node {
name: "fc7_mbox_loc/Conv2D"
op: "Conv2D"
input: "last_relu"
input: "fc7_mbox_loc/weights"
attr {
key: "dilations"
value {
list {
i: 1
i: 1
i: 1
i: 1
}
}
}
attr {
key: "padding"
value {
s: "SAME"
}
}
attr {
key: "strides"
value {
list {
i: 1
i: 1
i: 1
i: 1
}
}
}
}
node {
name: "fc7_mbox_loc/BiasAdd"
op: "BiasAdd"
input: "fc7_mbox_loc/Conv2D"
input: "fc7_mbox_loc/bias"
}
node {
name: "flatten_1/Reshape"
op: "Flatten"
input: "fc7_mbox_loc/BiasAdd"
}
node {
name: "mbox_loc"
op: "ConcatV2"
input: "flatten/Reshape"
input: "flatten_1/Reshape"
input: "flatten_2/Reshape"
input: "flatten_3/Reshape"
input: "flatten_4/Reshape"
input: "flatten_5/Reshape"
input: "mbox_loc/axis"
}
node {
name: "fc7_mbox_conf/Conv2D"
op: "Conv2D"
input: "last_relu"
input: "fc7_mbox_conf/weights"
attr {
key: "dilations"
value {
list {
i: 1
i: 1
i: 1
i: 1
}
}
}
attr {
key: "padding"
value {
s: "SAME"
}
}
attr {
key: "strides"
value {
list {
i: 1
i: 1
i: 1
i: 1
}
}
}
}
node {
name: "fc7_mbox_conf/BiasAdd"
op: "BiasAdd"
input: "fc7_mbox_conf/Conv2D"
input: "fc7_mbox_conf/bias"
}
node {
name: "flatten_7/Reshape"
op: "Flatten"
input: "fc7_mbox_conf/BiasAdd"
}
node {
name: "mbox_conf"
op: "ConcatV2"
input: "flatten_6/Reshape"
input: "flatten_7/Reshape"
input: "flatten_8/Reshape"
input: "flatten_9/Reshape"
input: "flatten_10/Reshape"
input: "flatten_11/Reshape"
input: "mbox_conf/axis"
}
node {
name: "mbox_conf_reshape"
op: "Reshape"
input: "mbox_conf"
input: "reshape_before_softmax"
}
node {
name: "mbox_conf_softmax"
op: "Softmax"
input: "mbox_conf_reshape"
attr {
key: "axis"
value {
i: 2
}
}
}
node {
name: "mbox_conf_flatten"
op: "Flatten"
input: "mbox_conf_softmax"
}
node {
name: "PriorBox_0"
op: "PriorBox"
input: "conv4_3_norm/mul_1"
input: "data"
attr {
key: "aspect_ratio"
value {
tensor {
dtype: DT_FLOAT
tensor_shape {
dim {
size: 1
}
}
float_val: 2.0
}
}
}
attr {
key: "clip"
value {
b: false
}
}
attr {
key: "flip"
value {
b: true
}
}
attr {
key: "max_size"
value {
i: 60
}
}
attr {
key: "min_size"
value {
i: 30
}
}
attr {
key: "offset"
value {
f: 0.5
}
}
attr {
key: "step"
value {
f: 8.0
}
}
attr {
key: "variance"
value {
tensor {
dtype: DT_FLOAT
tensor_shape {
dim {
size: 4
}
}
float_val: 0.10000000149
float_val: 0.10000000149
float_val: 0.20000000298
float_val: 0.20000000298
}
}
}
}
node {
name: "PriorBox_1"
op: "PriorBox"
input: "last_relu"
input: "data"
attr {
key: "aspect_ratio"
value {
tensor {
dtype: DT_FLOAT
tensor_shape {
dim {
size: 2
}
}
float_val: 2.0
float_val: 3.0
}
}
}
attr {
key: "clip"
value {
b: false
}
}
attr {
key: "flip"
value {
b: true
}
}
attr {
key: "max_size"
value {
i: 111
}
}
attr {
key: "min_size"
value {
i: 60
}
}
attr {
key: "offset"
value {
f: 0.5
}
}
attr {
key: "step"
value {
f: 16.0
}
}
attr {
key: "variance"
value {
tensor {
dtype: DT_FLOAT
tensor_shape {
dim {
size: 4
}
}
float_val: 0.10000000149
float_val: 0.10000000149
float_val: 0.20000000298
float_val: 0.20000000298
}
}
}
}
node {
name: "PriorBox_2"
op: "PriorBox"
input: "conv6_2_h/Relu"
input: "data"
attr {
key: "aspect_ratio"
value {
tensor {
dtype: DT_FLOAT
tensor_shape {
dim {
size: 2
}
}
float_val: 2.0
float_val: 3.0
}
}
}
attr {
key: "clip"
value {
b: false
}
}
attr {
key: "flip"
value {
b: true
}
}
attr {
key: "max_size"
value {
i: 162
}
}
attr {
key: "min_size"
value {
i: 111
}
}
attr {
key: "offset"
value {
f: 0.5
}
}
attr {
key: "step"
value {
f: 32.0
}
}
attr {
key: "variance"
value {
tensor {
dtype: DT_FLOAT
tensor_shape {
dim {
size: 4
}
}
float_val: 0.10000000149
float_val: 0.10000000149
float_val: 0.20000000298
float_val: 0.20000000298
}
}
}
}
node {
name: "PriorBox_3"
op: "PriorBox"
input: "conv7_2_h/Relu"
input: "data"
attr {
key: "aspect_ratio"
value {
tensor {
dtype: DT_FLOAT
tensor_shape {
dim {
size: 2
}
}
float_val: 2.0
float_val: 3.0
}
}
}
attr {
key: "clip"
value {
b: false
}
}
attr {
key: "flip"
value {
b: true
}
}
attr {
key: "max_size"
value {
i: 213
}
}
attr {
key: "min_size"
value {
i: 162
}
}
attr {
key: "offset"
value {
f: 0.5
}
}
attr {
key: "step"
value {
f: 64.0
}
}
attr {
key: "variance"
value {
tensor {
dtype: DT_FLOAT
tensor_shape {
dim {
size: 4
}
}
float_val: 0.10000000149
float_val: 0.10000000149
float_val: 0.20000000298
float_val: 0.20000000298
}
}
}
}
node {
name: "PriorBox_4"
op: "PriorBox"
input: "conv8_2_h/Relu"
input: "data"
attr {
key: "aspect_ratio"
value {
tensor {
dtype: DT_FLOAT
tensor_shape {
dim {
size: 1
}
}
float_val: 2.0
}
}
}
attr {
key: "clip"
value {
b: false
}
}
attr {
key: "flip"
value {
b: true
}
}
attr {
key: "max_size"
value {
i: 264
}
}
attr {
key: "min_size"
value {
i: 213
}
}
attr {
key: "offset"
value {
f: 0.5
}
}
attr {
key: "step"
value {
f: 100.0
}
}
attr {
key: "variance"
value {
tensor {
dtype: DT_FLOAT
tensor_shape {
dim {
size: 4
}
}
float_val: 0.10000000149
float_val: 0.10000000149
float_val: 0.20000000298
float_val: 0.20000000298
}
}
}
}
node {
name: "PriorBox_5"
op: "PriorBox"
input: "conv9_2_h/Relu"
input: "data"
attr {
key: "aspect_ratio"
value {
tensor {
dtype: DT_FLOAT
tensor_shape {
dim {
size: 1
}
}
float_val: 2.0
}
}
}
attr {
key: "clip"
value {
b: false
}
}
attr {
key: "flip"
value {
b: true
}
}
attr {
key: "max_size"
value {
i: 315
}
}
attr {
key: "min_size"
value {
i: 264
}
}
attr {
key: "offset"
value {
f: 0.5
}
}
attr {
key: "step"
value {
f: 300.0
}
}
attr {
key: "variance"
value {
tensor {
dtype: DT_FLOAT
tensor_shape {
dim {
size: 4
}
}
float_val: 0.10000000149
float_val: 0.10000000149
float_val: 0.20000000298
float_val: 0.20000000298
}
}
}
}
node {
name: "mbox_priorbox"
op: "ConcatV2"
input: "PriorBox_0"
input: "PriorBox_1"
input: "PriorBox_2"
input: "PriorBox_3"
input: "PriorBox_4"
input: "PriorBox_5"
input: "mbox_loc/axis"
}
node {
name: "detection_out"
op: "DetectionOutput"
input: "mbox_loc"
input: "mbox_conf_flatten"
input: "mbox_priorbox"
attr {
key: "background_label_id"
value {
i: 0
}
}
attr {
key: "code_type"
value {
s: "CENTER_SIZE"
}
}
attr {
key: "confidence_threshold"
value {
f: 0.00999999977648
}
}
attr {
key: "keep_top_k"
value {
i: 200
}
}
attr {
key: "nms_threshold"
value {
f: 0.449999988079
}
}
attr {
key: "num_classes"
value {
i: 2
}
}
attr {
key: "share_location"
value {
b: true
}
}
attr {
key: "top_k"
value {
i: 400
}
}
}
node {
name: "reshape_before_softmax"
op: "Const"
attr {
key: "value"
value {
tensor {
dtype: DT_INT32
tensor_shape {
dim {
size: 3
}
}
int_val: 0
int_val: -1
int_val: 2
}
}
}
}
library {
}
......@@ -23,13 +23,6 @@ import time
# model = load_model(
# 'checkpoint/er-best-mobilenet1-bt32-model-classweight-adam.h5')
def get_key(val):
for key, value in labels_dict_.items():
if(value == val):
return key
def convertMillis(millis):
seconds = (millis/1000) % 60
minutes = (millis/(1000*60)) % 60
......@@ -51,7 +44,7 @@ def videoDetector(input_fps, video_name):
detector = dlib.get_frontal_face_detector()
# face & emotion detection time dict
descs = np.load('../static/img/descs.npy', allow_pickle=True)[()]
descs = np.load('static/img/descs.npy', allow_pickle=True)[()]
labels_dict_ = {0: 'angry', 1: 'fear', 2: 'happy',
3: 'neutral', 4: 'sad', 5: 'surprise'}
face_emotion_dict = {}
......@@ -129,3 +122,5 @@ def videoDetector(input_fps, video_name):
for i in range(1, 5):
cv2.destroyAllWindows()
cv2.waitKey(1)
return face_emotion_dict
......
......@@ -9,114 +9,99 @@ import pathlib
import time
import pandas as pd
import tensorflow as tf
from tensorflow.keras.preprocessing.image import ImageDataGenerator,load_img
from tensorflow.keras.preprocessing.image import ImageDataGenerator, load_img
from tensorflow.keras.models import load_model
from tensorflow.keras import regularizers
from tensorflow import keras
import time
start = time.time()
detector = dlib.get_frontal_face_detector()
predictor = dlib.shape_predictor('./models/shape_predictor_68_face_landmarks.dat')
facerec = dlib.face_recognition_model_v1('./models/dlib_face_recognition_resnet_model_v1.dat')
model = load_model('../checkpoint/er-best-mobilenet1-bt32-model-classweight-adam.h5')
def get_key(val):
for key, value in labels_dict_.items():
if(value == val):
return key
def convertMillis(millis):
seconds=(millis/1000)%60
minutes=(millis/(1000*60))%60
hours=(millis/(1000*60*60))%24
seconds = (millis/1000) % 60
minutes = (millis/(1000*60)) % 60
hours = (millis/(1000*60*60)) % 24
return seconds, int(minutes), int(hours)
def videoDetector(input_fps, video_name):
def videoDetector(second, video_name):
# face & emotion detection model load
detector = dlib.get_frontal_face_detector()
predictor = dlib.shape_predictor(
'face_emotion_recognition/models/shape_predictor_68_face_landmarks.dat')
facerec = dlib.face_recognition_model_v1(
'face_emotion_recognition/models/dlib_face_recognition_resnet_model_v1.dat')
model = load_model(
'checkpoint/er-best-mobilenet1-bt32-model-classweight-adam.h5')
# face & emotion detection time dict
descs = np.load('./img/descs.npy', allow_pickle=True)[()]
labels_dict_ = {0 : 'angry', 1 : 'fear' , 2: 'happy', 3: 'neutral', 4: 'sad', 5: 'surprise'}
descs = np.load('static/img/descs.npy', allow_pickle=True)[()]
labels_dict_ = {0: 'angry', 1: 'fear', 2: 'happy',
3: 'neutral', 4: 'sad', 5: 'surprise'}
face_emotion_dict = {}
for name, saved_desc in descs.items():
face_emotion_dict[name] = {'angry': [], 'fear': [], 'happy': [], 'neutral': [], 'sad': [], 'surprise': []}
face_emotion_dict[name] = {'angry': [], 'fear': [
], 'happy': [], 'neutral': [], 'sad': [], 'surprise': []}
# video 정보 불러오기
video_path = './data/' + video_name + '.mp4'
cap=cv2.VideoCapture(video_path)
video_path = 'static/video/' + video_name + '.mp4'
cap = cv2.VideoCapture(video_path)
# 동영상 크기(frame정보)를 읽어옴
frameWidth = int(cap.get(cv2.CAP_PROP_FRAME_WIDTH))
frameHeight = int(cap.get(cv2.CAP_PROP_FRAME_HEIGHT))
frame_size = (frameWidth, frameHeight)
fps = cap.get((cv2.CAP_PROP_FPS))
print(fps)
fps = cap.get(cv2.CAP_PROP_FPS)
multiplier = fps * second
_, img_bgr = cap.read() # (800, 1920, 3)
padding_size = 0
resized_width = 1920
video_size = (resized_width, int(img_bgr.shape[0] * resized_width // img_bgr.shape[1]))
timestamps = [cap.get(cv2.CAP_PROP_POS_MSEC)]
prev_time = 0
frameCount = 0
ret = 1
fourcc = cv2.VideoWriter_fourcc('m', 'p', '4', 'v')
while True:
retval, frameBGR = cap.read() # 영상을 한 frame씩 읽어오기
current_time = time.time() - prev_time
while ret:
frameId = int(round(cap.get(1))) # 현재 프레임 번호 가져오기
ret, frameBGR = cap.read() # 영상을 한 frame씩 읽어오기
if(type(frameBGR) == type(None)):
pass
else:
frameBGR = cv2.resize(frameBGR, video_size)
frame = cv2.cvtColor(frameBGR, cv2.COLOR_BGR2RGB)
if (retval is True) and (current_time > 1.5) :
prev_time = time.time()
if (ret is True) and (frameId % multiplier < 1):
faces = detector(frame, 1)
for (i, face) in enumerate(faces):
shape = predictor(frame, face)
face_descriptor = facerec.compute_face_descriptor(frame, shape)
img = cv2.resize(frame[face.top():face.bottom(), face.left():face.right()], dsize=(224, 224), interpolation = cv2.INTER_CUBIC)
imgarr = np.array(img).reshape(1, 224, 224, 3) /255
emotion = labels_dict_[model.predict(imgarr).argmax(axis=-1)[0]]
last_found = {'name': 'unknown', 'dist': 0.6, 'color': (0,0,255)}
for name, saved_desc in descs.items():
dist = np.linalg.norm([face_descriptor] - saved_desc, axis=1)
if dist < last_found['dist']:
last_found = {'name': name, 'dist': dist, 'color': (255,255,255)}
cv2.rectangle(frameBGR, pt1=(face.left(), face.top()), pt2=(face.right(), face.bottom()), color=last_found['color'], thickness=2)
cv2.putText(frameBGR, last_found['name'] + ',' + emotion , org=(face.left(), face.top()), fontFace=cv2.FONT_HERSHEY_SIMPLEX, fontScale=1, color=last_found['color'], thickness=2)
con_sec, con_min, con_hour = convertMillis(cap.get(cv2.CAP_PROP_POS_MSEC))
face_emotion_dict[last_found['name']][emotion].append("{0}:{1}:{2}".format(con_hour, con_min, round(con_sec, 3)))
print("{0}:{1}:{2} {3}".format(con_hour, con_min, round(con_sec, 3), emotion))
cv2.imshow('frame', frameBGR)
key = cv2.waitKey(25)
if key == 27 :
break
try:
shape = predictor(frame, face)
face_descriptor = facerec.compute_face_descriptor(
frame, shape)
img = cv2.resize(frame[face.top():face.bottom(), face.left(
):face.right()], dsize=(224, 224), interpolation=cv2.INTER_CUBIC)
imgarr = np.array(img).reshape(1, 224, 224, 3) / 255
emotion = labels_dict_[
model.predict(imgarr).argmax(axis=-1)[0]]
last_found = {'name': 'unknown',
'dist': 0.6, 'color': (0, 0, 255)}
for name, saved_desc in descs.items():
dist = np.linalg.norm(
[face_descriptor] - saved_desc, axis=1)
if dist < last_found['dist']:
last_found = {
'name': name, 'dist': dist, 'color': (255, 255, 255)}
cv2.rectangle(frameBGR, pt1=(face.left(), face.top()), pt2=(
face.right(), face.bottom()), color=last_found['color'], thickness=2)
cv2.putText(frameBGR, last_found['name'] + ',' + emotion, org=(face.left(), face.top(
)), fontFace=cv2.FONT_HERSHEY_SIMPLEX, fontScale=1, color=last_found['color'], thickness=2)
con_sec, con_min, con_hour = convertMillis(
cap.get(cv2.CAP_PROP_POS_MSEC))
face_emotion_dict[last_found['name']][emotion].append(
"{0}:{1}:{2}".format(con_hour, con_min, round(con_sec, 3)))
print("{0}:{1}:{2} {3}".format(
con_hour, con_min, round(con_sec, 3), emotion))
except Exception as e:
print(str(e))
frameCount += 1
print(face_emotion_dict)
print("총 시간 : ", time.time() - start)
if cap.isOpened():
cap.release()
for i in range(1,5):
cv2.destroyAllWindows()
cv2.waitKey(1)
if __name__ == '__main__':
videoDetector(3, 'zoom_1')
\ No newline at end of file
return face_emotion_dict
......
<!doctype html>
<html lang="en">
<head>
<meta charset="UTF-8">
<meta name="viewport"
content="width=device-width, user-scalable=no, initial-scale=1.0, maximum-scale=1.0, minimum-scale=1.0">
<meta http-equiv="X-UA-Compatible" content="ie=edge">
<link rel="stylesheet" href="https://maxcdn.bootstrapcdn.com/bootstrap/4.0.0/css/bootstrap.min.css"
integrity="sha384-Gn5384xqQ1aoWXA+058RXPxPg6fy4IWvTNh0E263XmFcJlSAwiGgFAW/dAiS6JXm" crossorigin="anonymous">
<title>Flask Face Emotion Recognition App</title>
</head>
<body>
<div class="container" style="margin-top: 100px">
<h3>Face Emotion Recognition Platform</h3>
<hr>
<form action="http://localhost:5000/uploadFace" method="POST" enctype="multipart/form-data">
<div class="form-group">
<label for="title" class="text-uppercase">Video Upload</label>
<input type="file" name="file">
<button type="submit" class="btn btn-outline-primary">Add</button>
</div>
</form>
<video autoplay width="320" height="240" controls>
<source src={{ url_for('static', filename="video/zoom_1.mp4") }} type="video/mp4">
</video>
<a href="/faceEmotinoRecognition" class="btn btn-outline-primary">얼굴 감정 인식 분석하기</a>
<table class="table">
<thead>
<tr>
<th scope="col ">name</th>
<th scope="col">happy</th>
<th scope="col">sad</th>
<th scope="col">fear</th>
<th scope="col">angry</th>
<th scope="col">neutral</th>
<th scope="col">surprise</th>
</tr>
</thead>
<tbody>
{% for face_img in face_imgs %}
<tr>
<td scope="row">{{ face_img.name }}</td>
{% if face_emotion_dict[face_img.name].happy %}
<td>
{% for time in face_emotion_dict[face_img.name].happy %}
<span>{{time}}</span>
{% endfor %}
</td>
{% else %}
<td> X </td>
{% endif %}
{% if face_emotion_dict[face_img.name].sad %}
<td>
{% for time in face_emotion_dict[face_img.name].sad %}
<span>{{time}}</span>
{% endfor %}
</td>
{% else %}
<td> X </td>
{% endif %}
{% if face_emotion_dict[face_img.name].fear %}
<td>
{% for time in face_emotion_dict[face_img.name].fear %}
<span>{{time}}</span>
{% endfor %}
</td>
{% else %}
<td> X </td>
{% endif %}
{% if face_emotion_dict[face_img.name].angry %}
<td>
{% for time in face_emotion_dict[face_img.name].angry %}
<span>{{time}}</span>
{% endfor %}
</td>
{% else %}
<td> X </td>
{% endif %}
{% if face_emotion_dict[face_img.name].neutral %}
<td>
{% for time in face_emotion_dict[face_img.name].neutral %}
<span>{{time}}</span>
{% endfor %}
</td>
{% else %}
<td> X </td>
{% endif %}
{% if face_emotion_dict[face_img.name].surprise %}
<td>
{% for time in face_emotion_dict[face_img.name].surprise %}
<span>{{time}}</span>
{% endfor %}
</td>
{% else %}
<td> X </td>
{% endif %}
</tr>
{% endfor %}
</tbody>
</table>
<hr/>
</div>
<script src="https://code.jquery.com/jquery-3.2.1.slim.min.js"
integrity="sha384-KJ3o2DKtIkvYIK3UENzmM7KCkRr/rE9/Qpg6aAZGJwFDMVNA/GpGFF93hXpG5KkN"
crossorigin="anonymous"></script>
<script src="https://cdnjs.cloudflare.com/ajax/libs/popper.js/1.12.9/umd/popper.min.js"
integrity="sha384-ApNbgh9B+Y1QKtv3Rn7W3mgPxhU9K/ScQsAP7hUibX39j7fakFPskvXusvfa0b4Q"
crossorigin="anonymous"></script>
<script src="https://maxcdn.bootstrapcdn.com/bootstrap/4.0.0/js/bootstrap.min.js"
integrity="sha384-JZR6Spejh4U02d8jOt6vLEHfe/JQGiRRSQQxSfFWpi1MquVdAyjUar5+76PVCmYl"
crossorigin="anonymous"></script>
</body>
</html>
\ No newline at end of file
......@@ -17,13 +17,21 @@
<h3>Face Emotion Recognition Platform</h3>
<hr>
<form action="http://localhost:5000/uploadFace" method="POST" enctype="multipart/form-data">
<form action="http://localhost:5000/uploadVideo" method="POST" enctype="multipart/form-data">
<div class="form-group">
<label for="title" class="text-uppercase">Video Upload</label>
<input type="file" name="file">
<input type="file" name="video">
<button type="submit" class="btn btn-outline-primary">Add</button>
</div>
</form>
<video autoplay width="320" height="240" controls>
<source src={{ url_for('static', filename="video/zoom_1.mp4") }} type="video/mp4">
</video>
<a href="/faceEmotinoRecognition" class="btn btn-outline-primary">얼굴 감정 인식 분석하기</a>
<hr/>
</div>
......