Showing
24 changed files
with
216 additions
and
89 deletions
... | @@ -2,7 +2,7 @@ import sys | ... | @@ -2,7 +2,7 @@ import sys |
2 | import os | 2 | import os |
3 | 3 | ||
4 | from flask.helpers import url_for | 4 | from flask.helpers import url_for |
5 | -from face_emotion_recognition import face_recognition, video2 | 5 | +from face_emotion_recognition import face_recognition, video4 |
6 | from flask import Flask, render_template | 6 | from flask import Flask, render_template |
7 | from flask.globals import request | 7 | from flask.globals import request |
8 | from werkzeug.utils import redirect, secure_filename | 8 | from werkzeug.utils import redirect, secure_filename |
... | @@ -39,7 +39,7 @@ def index(): | ... | @@ -39,7 +39,7 @@ def index(): |
39 | @app.route('/goTest', methods=('GET', 'POST')) # 접속하는 url | 39 | @app.route('/goTest', methods=('GET', 'POST')) # 접속하는 url |
40 | def test(): | 40 | def test(): |
41 | if request.method == 'GET': | 41 | if request.method == 'GET': |
42 | - return render_template('test.html', face_imgs=find_face_imgs()) | 42 | + return render_template('test.html') |
43 | 43 | ||
44 | 44 | ||
45 | @app.route('/uploadFace', methods=('GET', 'POST')) | 45 | @app.route('/uploadFace', methods=('GET', 'POST')) |
... | @@ -61,11 +61,19 @@ def delete_face(face_name): | ... | @@ -61,11 +61,19 @@ def delete_face(face_name): |
61 | return redirect(url_for('index')) | 61 | return redirect(url_for('index')) |
62 | 62 | ||
63 | 63 | ||
64 | -@app.route('/uploadVideo') | 64 | +@app.route('/uploadVideo', methods=('GET', 'POST')) |
65 | def upload_video(): | 65 | def upload_video(): |
66 | + if request.method == 'POST': | ||
66 | f = request.files.get('video') | 67 | f = request.files.get('video') |
67 | f.save("./static/video/" + secure_filename(f.filename)) | 68 | f.save("./static/video/" + secure_filename(f.filename)) |
68 | - return 'video uploaded successfully' | 69 | + return redirect(url_for('test')) |
70 | + | ||
71 | + | ||
72 | +@app.route('/faceEmotinoRecognition') | ||
73 | +def faceEmotinoRecognition(): | ||
74 | + face_emotion_dict = video4.videoDetector(3, 'record0') | ||
75 | + print(face_emotion_dict) | ||
76 | + return render_template('result.html', face_emotion_dict=face_emotion_dict, face_imgs=find_face_imgs()) | ||
69 | 77 | ||
70 | 78 | ||
71 | if __name__ == "__main__": | 79 | if __name__ == "__main__": | ... | ... |
No preview for this file type
This diff could not be displayed because it is too large.
This diff is collapsed. Click to expand it.
No preview for this file type
... | @@ -23,13 +23,6 @@ import time | ... | @@ -23,13 +23,6 @@ import time |
23 | # model = load_model( | 23 | # model = load_model( |
24 | # 'checkpoint/er-best-mobilenet1-bt32-model-classweight-adam.h5') | 24 | # 'checkpoint/er-best-mobilenet1-bt32-model-classweight-adam.h5') |
25 | 25 | ||
26 | - | ||
27 | -def get_key(val): | ||
28 | - for key, value in labels_dict_.items(): | ||
29 | - if(value == val): | ||
30 | - return key | ||
31 | - | ||
32 | - | ||
33 | def convertMillis(millis): | 26 | def convertMillis(millis): |
34 | seconds = (millis/1000) % 60 | 27 | seconds = (millis/1000) % 60 |
35 | minutes = (millis/(1000*60)) % 60 | 28 | minutes = (millis/(1000*60)) % 60 |
... | @@ -51,7 +44,7 @@ def videoDetector(input_fps, video_name): | ... | @@ -51,7 +44,7 @@ def videoDetector(input_fps, video_name): |
51 | detector = dlib.get_frontal_face_detector() | 44 | detector = dlib.get_frontal_face_detector() |
52 | 45 | ||
53 | # face & emotion detection time dict | 46 | # face & emotion detection time dict |
54 | - descs = np.load('../static/img/descs.npy', allow_pickle=True)[()] | 47 | + descs = np.load('static/img/descs.npy', allow_pickle=True)[()] |
55 | labels_dict_ = {0: 'angry', 1: 'fear', 2: 'happy', | 48 | labels_dict_ = {0: 'angry', 1: 'fear', 2: 'happy', |
56 | 3: 'neutral', 4: 'sad', 5: 'surprise'} | 49 | 3: 'neutral', 4: 'sad', 5: 'surprise'} |
57 | face_emotion_dict = {} | 50 | face_emotion_dict = {} |
... | @@ -129,3 +122,5 @@ def videoDetector(input_fps, video_name): | ... | @@ -129,3 +122,5 @@ def videoDetector(input_fps, video_name): |
129 | for i in range(1, 5): | 122 | for i in range(1, 5): |
130 | cv2.destroyAllWindows() | 123 | cv2.destroyAllWindows() |
131 | cv2.waitKey(1) | 124 | cv2.waitKey(1) |
125 | + | ||
126 | + return face_emotion_dict | ... | ... |
... | @@ -9,114 +9,99 @@ import pathlib | ... | @@ -9,114 +9,99 @@ import pathlib |
9 | import time | 9 | import time |
10 | import pandas as pd | 10 | import pandas as pd |
11 | import tensorflow as tf | 11 | import tensorflow as tf |
12 | -from tensorflow.keras.preprocessing.image import ImageDataGenerator,load_img | 12 | +from tensorflow.keras.preprocessing.image import ImageDataGenerator, load_img |
13 | from tensorflow.keras.models import load_model | 13 | from tensorflow.keras.models import load_model |
14 | from tensorflow.keras import regularizers | 14 | from tensorflow.keras import regularizers |
15 | from tensorflow import keras | 15 | from tensorflow import keras |
16 | import time | 16 | import time |
17 | 17 | ||
18 | 18 | ||
19 | -start = time.time() | ||
20 | -detector = dlib.get_frontal_face_detector() | ||
21 | -predictor = dlib.shape_predictor('./models/shape_predictor_68_face_landmarks.dat') | ||
22 | -facerec = dlib.face_recognition_model_v1('./models/dlib_face_recognition_resnet_model_v1.dat') | ||
23 | -model = load_model('../checkpoint/er-best-mobilenet1-bt32-model-classweight-adam.h5') | ||
24 | - | ||
25 | - | ||
26 | -def get_key(val): | ||
27 | - for key, value in labels_dict_.items(): | ||
28 | - if(value == val): | ||
29 | - return key | ||
30 | - | ||
31 | - | ||
32 | def convertMillis(millis): | 19 | def convertMillis(millis): |
33 | - seconds=(millis/1000)%60 | 20 | + seconds = (millis/1000) % 60 |
34 | - minutes=(millis/(1000*60))%60 | 21 | + minutes = (millis/(1000*60)) % 60 |
35 | - hours=(millis/(1000*60*60))%24 | 22 | + hours = (millis/(1000*60*60)) % 24 |
36 | return seconds, int(minutes), int(hours) | 23 | return seconds, int(minutes), int(hours) |
37 | 24 | ||
38 | 25 | ||
39 | -def videoDetector(input_fps, video_name): | 26 | +def videoDetector(second, video_name): |
27 | + | ||
28 | + # face & emotion detection model load | ||
29 | + detector = dlib.get_frontal_face_detector() | ||
30 | + predictor = dlib.shape_predictor( | ||
31 | + 'face_emotion_recognition/models/shape_predictor_68_face_landmarks.dat') | ||
32 | + facerec = dlib.face_recognition_model_v1( | ||
33 | + 'face_emotion_recognition/models/dlib_face_recognition_resnet_model_v1.dat') | ||
34 | + model = load_model( | ||
35 | + 'checkpoint/er-best-mobilenet1-bt32-model-classweight-adam.h5') | ||
40 | 36 | ||
41 | # face & emotion detection time dict | 37 | # face & emotion detection time dict |
42 | - descs = np.load('./img/descs.npy', allow_pickle=True)[()] | 38 | + descs = np.load('static/img/descs.npy', allow_pickle=True)[()] |
43 | - labels_dict_ = {0 : 'angry', 1 : 'fear' , 2: 'happy', 3: 'neutral', 4: 'sad', 5: 'surprise'} | 39 | + labels_dict_ = {0: 'angry', 1: 'fear', 2: 'happy', |
40 | + 3: 'neutral', 4: 'sad', 5: 'surprise'} | ||
44 | face_emotion_dict = {} | 41 | face_emotion_dict = {} |
45 | for name, saved_desc in descs.items(): | 42 | for name, saved_desc in descs.items(): |
46 | - face_emotion_dict[name] = {'angry': [], 'fear': [], 'happy': [], 'neutral': [], 'sad': [], 'surprise': []} | 43 | + face_emotion_dict[name] = {'angry': [], 'fear': [ |
47 | - | 44 | + ], 'happy': [], 'neutral': [], 'sad': [], 'surprise': []} |
48 | 45 | ||
49 | # video 정보 불러오기 | 46 | # video 정보 불러오기 |
50 | - video_path = './data/' + video_name + '.mp4' | 47 | + video_path = 'static/video/' + video_name + '.mp4' |
51 | - cap=cv2.VideoCapture(video_path) | 48 | + cap = cv2.VideoCapture(video_path) |
52 | 49 | ||
53 | # 동영상 크기(frame정보)를 읽어옴 | 50 | # 동영상 크기(frame정보)를 읽어옴 |
54 | - frameWidth = int(cap.get(cv2.CAP_PROP_FRAME_WIDTH)) | 51 | + fps = cap.get(cv2.CAP_PROP_FPS) |
55 | - frameHeight = int(cap.get(cv2.CAP_PROP_FRAME_HEIGHT)) | 52 | + multiplier = fps * second |
56 | - frame_size = (frameWidth, frameHeight) | ||
57 | - fps = cap.get((cv2.CAP_PROP_FPS)) | ||
58 | - print(fps) | ||
59 | 53 | ||
54 | + frameCount = 0 | ||
55 | + ret = 1 | ||
60 | 56 | ||
61 | - _, img_bgr = cap.read() # (800, 1920, 3) | 57 | + while ret: |
62 | - padding_size = 0 | 58 | + frameId = int(round(cap.get(1))) # 현재 프레임 번호 가져오기 |
63 | - resized_width = 1920 | 59 | + ret, frameBGR = cap.read() # 영상을 한 frame씩 읽어오기 |
64 | - video_size = (resized_width, int(img_bgr.shape[0] * resized_width // img_bgr.shape[1])) | ||
65 | - timestamps = [cap.get(cv2.CAP_PROP_POS_MSEC)] | ||
66 | - prev_time = 0 | ||
67 | - | ||
68 | - fourcc = cv2.VideoWriter_fourcc('m', 'p', '4', 'v') | ||
69 | - while True: | ||
70 | - retval, frameBGR = cap.read() # 영상을 한 frame씩 읽어오기 | ||
71 | - current_time = time.time() - prev_time | ||
72 | 60 | ||
73 | if(type(frameBGR) == type(None)): | 61 | if(type(frameBGR) == type(None)): |
74 | pass | 62 | pass |
75 | else: | 63 | else: |
76 | - frameBGR = cv2.resize(frameBGR, video_size) | ||
77 | frame = cv2.cvtColor(frameBGR, cv2.COLOR_BGR2RGB) | 64 | frame = cv2.cvtColor(frameBGR, cv2.COLOR_BGR2RGB) |
78 | 65 | ||
79 | - if (retval is True) and (current_time > 1.5) : | 66 | + if (ret is True) and (frameId % multiplier < 1): |
80 | - prev_time = time.time() | ||
81 | faces = detector(frame, 1) | 67 | faces = detector(frame, 1) |
82 | 68 | ||
83 | for (i, face) in enumerate(faces): | 69 | for (i, face) in enumerate(faces): |
70 | + try: | ||
84 | shape = predictor(frame, face) | 71 | shape = predictor(frame, face) |
85 | - face_descriptor = facerec.compute_face_descriptor(frame, shape) | 72 | + face_descriptor = facerec.compute_face_descriptor( |
73 | + frame, shape) | ||
86 | 74 | ||
87 | - img = cv2.resize(frame[face.top():face.bottom(), face.left():face.right()], dsize=(224, 224), interpolation = cv2.INTER_CUBIC) | 75 | + img = cv2.resize(frame[face.top():face.bottom(), face.left( |
88 | - imgarr = np.array(img).reshape(1, 224, 224, 3) /255 | 76 | + ):face.right()], dsize=(224, 224), interpolation=cv2.INTER_CUBIC) |
89 | - emotion = labels_dict_[model.predict(imgarr).argmax(axis=-1)[0]] | 77 | + imgarr = np.array(img).reshape(1, 224, 224, 3) / 255 |
78 | + emotion = labels_dict_[ | ||
79 | + model.predict(imgarr).argmax(axis=-1)[0]] | ||
90 | 80 | ||
91 | - last_found = {'name': 'unknown', 'dist': 0.6, 'color': (0,0,255)} | 81 | + last_found = {'name': 'unknown', |
82 | + 'dist': 0.6, 'color': (0, 0, 255)} | ||
92 | 83 | ||
93 | for name, saved_desc in descs.items(): | 84 | for name, saved_desc in descs.items(): |
94 | - dist = np.linalg.norm([face_descriptor] - saved_desc, axis=1) | 85 | + dist = np.linalg.norm( |
86 | + [face_descriptor] - saved_desc, axis=1) | ||
95 | if dist < last_found['dist']: | 87 | if dist < last_found['dist']: |
96 | - last_found = {'name': name, 'dist': dist, 'color': (255,255,255)} | 88 | + last_found = { |
97 | - | 89 | + 'name': name, 'dist': dist, 'color': (255, 255, 255)} |
98 | - cv2.rectangle(frameBGR, pt1=(face.left(), face.top()), pt2=(face.right(), face.bottom()), color=last_found['color'], thickness=2) | 90 | + |
99 | - cv2.putText(frameBGR, last_found['name'] + ',' + emotion , org=(face.left(), face.top()), fontFace=cv2.FONT_HERSHEY_SIMPLEX, fontScale=1, color=last_found['color'], thickness=2) | 91 | + cv2.rectangle(frameBGR, pt1=(face.left(), face.top()), pt2=( |
100 | - | 92 | + face.right(), face.bottom()), color=last_found['color'], thickness=2) |
101 | - con_sec, con_min, con_hour = convertMillis(cap.get(cv2.CAP_PROP_POS_MSEC)) | 93 | + cv2.putText(frameBGR, last_found['name'] + ',' + emotion, org=(face.left(), face.top( |
102 | - face_emotion_dict[last_found['name']][emotion].append("{0}:{1}:{2}".format(con_hour, con_min, round(con_sec, 3))) | 94 | + )), fontFace=cv2.FONT_HERSHEY_SIMPLEX, fontScale=1, color=last_found['color'], thickness=2) |
103 | - print("{0}:{1}:{2} {3}".format(con_hour, con_min, round(con_sec, 3), emotion)) | 95 | + |
104 | - | 96 | + con_sec, con_min, con_hour = convertMillis( |
105 | - cv2.imshow('frame', frameBGR) | 97 | + cap.get(cv2.CAP_PROP_POS_MSEC)) |
106 | - | 98 | + face_emotion_dict[last_found['name']][emotion].append( |
107 | - key = cv2.waitKey(25) | 99 | + "{0}:{1}:{2}".format(con_hour, con_min, round(con_sec, 3))) |
108 | - if key == 27 : | 100 | + print("{0}:{1}:{2} {3}".format( |
109 | - break | 101 | + con_hour, con_min, round(con_sec, 3), emotion)) |
110 | - | 102 | + except Exception as e: |
103 | + print(str(e)) | ||
104 | + | ||
105 | + frameCount += 1 | ||
111 | print(face_emotion_dict) | 106 | print(face_emotion_dict) |
112 | - print("총 시간 : ", time.time() - start) | ||
113 | - if cap.isOpened(): | ||
114 | - cap.release() | ||
115 | - | ||
116 | - for i in range(1,5): | ||
117 | - cv2.destroyAllWindows() | ||
118 | - cv2.waitKey(1) | ||
119 | - | ||
120 | - | ||
121 | -if __name__ == '__main__': | ||
122 | - videoDetector(3, 'zoom_1') | ||
... | \ No newline at end of file | ... | \ No newline at end of file |
107 | + return face_emotion_dict | ... | ... |
code/web/templates/result.html
0 → 100644
1 | +<!doctype html> | ||
2 | +<html lang="en"> | ||
3 | + | ||
4 | +<head> | ||
5 | + <meta charset="UTF-8"> | ||
6 | + <meta name="viewport" | ||
7 | + content="width=device-width, user-scalable=no, initial-scale=1.0, maximum-scale=1.0, minimum-scale=1.0"> | ||
8 | + <meta http-equiv="X-UA-Compatible" content="ie=edge"> | ||
9 | + <link rel="stylesheet" href="https://maxcdn.bootstrapcdn.com/bootstrap/4.0.0/css/bootstrap.min.css" | ||
10 | + integrity="sha384-Gn5384xqQ1aoWXA+058RXPxPg6fy4IWvTNh0E263XmFcJlSAwiGgFAW/dAiS6JXm" crossorigin="anonymous"> | ||
11 | + <title>Flask Face Emotion Recognition App</title> | ||
12 | +</head> | ||
13 | + | ||
14 | +<body> | ||
15 | + | ||
16 | + <div class="container" style="margin-top: 100px"> | ||
17 | + <h3>Face Emotion Recognition Platform</h3> | ||
18 | + <hr> | ||
19 | + | ||
20 | + <form action="http://localhost:5000/uploadFace" method="POST" enctype="multipart/form-data"> | ||
21 | + <div class="form-group"> | ||
22 | + <label for="title" class="text-uppercase">Video Upload</label> | ||
23 | + <input type="file" name="file"> | ||
24 | + <button type="submit" class="btn btn-outline-primary">Add</button> | ||
25 | + </div> | ||
26 | + </form> | ||
27 | + | ||
28 | + <video autoplay width="320" height="240" controls> | ||
29 | + <source src={{ url_for('static', filename="video/zoom_1.mp4") }} type="video/mp4"> | ||
30 | + </video> | ||
31 | + | ||
32 | + <a href="/faceEmotinoRecognition" class="btn btn-outline-primary">얼굴 감정 인식 분석하기</a> | ||
33 | + | ||
34 | + | ||
35 | + <table class="table"> | ||
36 | + <thead> | ||
37 | + <tr> | ||
38 | + <th scope="col ">name</th> | ||
39 | + <th scope="col">happy</th> | ||
40 | + <th scope="col">sad</th> | ||
41 | + <th scope="col">fear</th> | ||
42 | + <th scope="col">angry</th> | ||
43 | + <th scope="col">neutral</th> | ||
44 | + <th scope="col">surprise</th> | ||
45 | + </tr> | ||
46 | + </thead> | ||
47 | + <tbody> | ||
48 | + | ||
49 | + {% for face_img in face_imgs %} | ||
50 | + <tr> | ||
51 | + <td scope="row">{{ face_img.name }}</td> | ||
52 | + {% if face_emotion_dict[face_img.name].happy %} | ||
53 | + <td> | ||
54 | + {% for time in face_emotion_dict[face_img.name].happy %} | ||
55 | + <span>{{time}}</span> | ||
56 | + {% endfor %} | ||
57 | + </td> | ||
58 | + {% else %} | ||
59 | + <td> X </td> | ||
60 | + {% endif %} | ||
61 | + | ||
62 | + {% if face_emotion_dict[face_img.name].sad %} | ||
63 | + <td> | ||
64 | + {% for time in face_emotion_dict[face_img.name].sad %} | ||
65 | + <span>{{time}}</span> | ||
66 | + {% endfor %} | ||
67 | + </td> | ||
68 | + {% else %} | ||
69 | + <td> X </td> | ||
70 | + {% endif %} | ||
71 | + | ||
72 | + {% if face_emotion_dict[face_img.name].fear %} | ||
73 | + <td> | ||
74 | + {% for time in face_emotion_dict[face_img.name].fear %} | ||
75 | + <span>{{time}}</span> | ||
76 | + {% endfor %} | ||
77 | + </td> | ||
78 | + {% else %} | ||
79 | + <td> X </td> | ||
80 | + {% endif %} | ||
81 | + | ||
82 | + {% if face_emotion_dict[face_img.name].angry %} | ||
83 | + <td> | ||
84 | + {% for time in face_emotion_dict[face_img.name].angry %} | ||
85 | + <span>{{time}}</span> | ||
86 | + {% endfor %} | ||
87 | + </td> | ||
88 | + {% else %} | ||
89 | + <td> X </td> | ||
90 | + {% endif %} | ||
91 | + | ||
92 | + {% if face_emotion_dict[face_img.name].neutral %} | ||
93 | + <td> | ||
94 | + {% for time in face_emotion_dict[face_img.name].neutral %} | ||
95 | + <span>{{time}}</span> | ||
96 | + {% endfor %} | ||
97 | + </td> | ||
98 | + {% else %} | ||
99 | + <td> X </td> | ||
100 | + {% endif %} | ||
101 | + | ||
102 | + {% if face_emotion_dict[face_img.name].surprise %} | ||
103 | + <td> | ||
104 | + {% for time in face_emotion_dict[face_img.name].surprise %} | ||
105 | + <span>{{time}}</span> | ||
106 | + {% endfor %} | ||
107 | + </td> | ||
108 | + {% else %} | ||
109 | + <td> X </td> | ||
110 | + {% endif %} | ||
111 | + | ||
112 | + </tr> | ||
113 | + {% endfor %} | ||
114 | + </tbody> | ||
115 | + </table> | ||
116 | + <hr/> | ||
117 | + </div> | ||
118 | + | ||
119 | + | ||
120 | + <script src="https://code.jquery.com/jquery-3.2.1.slim.min.js" | ||
121 | + integrity="sha384-KJ3o2DKtIkvYIK3UENzmM7KCkRr/rE9/Qpg6aAZGJwFDMVNA/GpGFF93hXpG5KkN" | ||
122 | + crossorigin="anonymous"></script> | ||
123 | + <script src="https://cdnjs.cloudflare.com/ajax/libs/popper.js/1.12.9/umd/popper.min.js" | ||
124 | + integrity="sha384-ApNbgh9B+Y1QKtv3Rn7W3mgPxhU9K/ScQsAP7hUibX39j7fakFPskvXusvfa0b4Q" | ||
125 | + crossorigin="anonymous"></script> | ||
126 | + <script src="https://maxcdn.bootstrapcdn.com/bootstrap/4.0.0/js/bootstrap.min.js" | ||
127 | + integrity="sha384-JZR6Spejh4U02d8jOt6vLEHfe/JQGiRRSQQxSfFWpi1MquVdAyjUar5+76PVCmYl" | ||
128 | + crossorigin="anonymous"></script> | ||
129 | +</body> | ||
130 | + | ||
131 | +</html> | ||
... | \ No newline at end of file | ... | \ No newline at end of file |
... | @@ -17,13 +17,21 @@ | ... | @@ -17,13 +17,21 @@ |
17 | <h3>Face Emotion Recognition Platform</h3> | 17 | <h3>Face Emotion Recognition Platform</h3> |
18 | <hr> | 18 | <hr> |
19 | 19 | ||
20 | - <form action="http://localhost:5000/uploadFace" method="POST" enctype="multipart/form-data"> | 20 | + <form action="http://localhost:5000/uploadVideo" method="POST" enctype="multipart/form-data"> |
21 | <div class="form-group"> | 21 | <div class="form-group"> |
22 | <label for="title" class="text-uppercase">Video Upload</label> | 22 | <label for="title" class="text-uppercase">Video Upload</label> |
23 | - <input type="file" name="file"> | 23 | + <input type="file" name="video"> |
24 | <button type="submit" class="btn btn-outline-primary">Add</button> | 24 | <button type="submit" class="btn btn-outline-primary">Add</button> |
25 | </div> | 25 | </div> |
26 | </form> | 26 | </form> |
27 | + | ||
28 | + <video autoplay width="320" height="240" controls> | ||
29 | + <source src={{ url_for('static', filename="video/zoom_1.mp4") }} type="video/mp4"> | ||
30 | + </video> | ||
31 | + | ||
32 | + <a href="/faceEmotinoRecognition" class="btn btn-outline-primary">얼굴 감정 인식 분석하기</a> | ||
33 | + | ||
34 | + <hr/> | ||
27 | </div> | 35 | </div> |
28 | 36 | ||
29 | 37 | ... | ... |
No preview for this file type
No preview for this file type
report/면담 및 진행보고서/면담보고서/4월면담보고서.docx
0 → 100644
No preview for this file type
report/면담 및 진행보고서/면담보고서/5월면담보고서.docx
0 → 100644
No preview for this file type
report/면담 및 진행보고서/진행보고서/10주차진행보고서.docx
0 → 100644
No preview for this file type
report/면담 및 진행보고서/진행보고서/11주차진행보고서.docx
0 → 100644
No preview for this file type
report/면담 및 진행보고서/진행보고서/12주차진행보고서.docx
0 → 100644
No preview for this file type
report/면담 및 진행보고서/진행보고서/13주차진행보고서.docx
0 → 100644
No preview for this file type
No preview for this file type
No preview for this file type
report/면담 및 진행보고서/진행보고서/6주차진행보고서.docx
0 → 100644
No preview for this file type
report/면담 및 진행보고서/진행보고서/7주차진행보고서.docx
0 → 100644
No preview for this file type
report/면담 및 진행보고서/진행보고서/8주차진행보고서.docx
0 → 100644
No preview for this file type
report/면담 및 진행보고서/진행보고서/9주차진행보고서.docx
0 → 100644
No preview for this file type
-
Please register or login to post a comment