조수연

알고리즘 완성본

1 +{
2 + "cells": [
3 + {
4 + "cell_type": "code",
5 + "execution_count": 72,
6 + "metadata": {},
7 + "outputs": [
8 + {
9 + "name": "stdout",
10 + "output_type": "stream",
11 + "text": [
12 + "Lab_b[skin] 29.32\n",
13 + "skin 의 warm 기준값과의 거리\n",
14 + "8.702000000000005\n",
15 + "skin 의 cool 기준값과의 거리\n",
16 + "12.32\n",
17 + "color_data\\warm_spring.png의 퍼스널 컬러는 웜톤(warm)입니다.\n"
18 + ]
19 + }
20 + ],
21 + "source": [
22 + "# 평균 색상 추출\n",
23 + "import cv2\n",
24 + "import numpy as np\n",
25 + "from sklearn.cluster import KMeans\n",
26 + "import matplotlib.pyplot as plt\n",
27 + "from mpl_toolkits.mplot3d import Axes3D\n",
28 + "from skimage import io\n",
29 + "from itertools import compress\n",
30 + "\n",
31 + "# tone 구별 \n",
32 + "from scipy.spatial import distance\n",
33 + "import copy\n",
34 + "import math\n",
35 + "import operator\n",
36 + "\n",
37 + "# main함수\n",
38 + "from colormath.color_objects import LabColor, sRGBColor, HSVColor\n",
39 + "from colormath.color_conversions import convert_color\n",
40 + "\n",
41 + "# color extract 클래스\n",
42 + "class DominantColors:\n",
43 + "\n",
44 + " CLUSTERS = None\n",
45 + " IMAGE = None\n",
46 + " COLORS = None\n",
47 + " LABELS = None\n",
48 + "\n",
49 + " def __init__(self, image, clusters=3):\n",
50 + " self.CLUSTERS = clusters\n",
51 + " img = cv2.cvtColor(image, cv2.COLOR_BGR2RGB)\n",
52 + " self.IMAGE = img.reshape((img.shape[0] * img.shape[1], 3))\n",
53 + "\n",
54 + " #using k-means to cluster pixels\n",
55 + " kmeans = KMeans(n_clusters = self.CLUSTERS)\n",
56 + " kmeans.fit(self.IMAGE)\n",
57 + "\n",
58 + " #the cluster centers are our dominant colors.\n",
59 + " self.COLORS = kmeans.cluster_centers_\n",
60 + " self.LABELS = kmeans.labels_\n",
61 + "\n",
62 + " def rgb_to_hex(self, rgb):\n",
63 + " return '#%02x%02x%02x' % (int(rgb[0]), int(rgb[1]), int(rgb[2]))\n",
64 + "\n",
65 + " # Return a list in order of color that appeared most often.\n",
66 + " def getHistogram(self):\n",
67 + " numLabels = np.arange(0, self.CLUSTERS+1)\n",
68 + " #create frequency count tables\n",
69 + " (hist, _) = np.histogram(self.LABELS, bins = numLabels)\n",
70 + " hist = hist.astype(\"float\")\n",
71 + " hist /= hist.sum()\n",
72 + "\n",
73 + " colors = self.COLORS\n",
74 + " #descending order sorting as per frequency count\n",
75 + " colors = colors[(-hist).argsort()]\n",
76 + " hist = hist[(-hist).argsort()]\n",
77 + " for i in range(self.CLUSTERS):\n",
78 + " colors[i] = colors[i].astype(int)\n",
79 + " # Blue mask 제거\n",
80 + " fil = [colors[i][2] < 250 and colors[i][0] > 10 for i in range(self.CLUSTERS)]\n",
81 + " colors = list(compress(colors, fil))\n",
82 + " return colors, hist\n",
83 + "\n",
84 + " def plotHistogram(self):\n",
85 + " colors, hist = self.getHistogram()\n",
86 + " #creating empty chart\n",
87 + " chart = np.zeros((50, 500, 3), np.uint8)\n",
88 + " start = 0\n",
89 + "\n",
90 + " #creating color rectangles\n",
91 + " for i in range(len(colors)):\n",
92 + " end = start + hist[i] * 500\n",
93 + " r,g,b = colors[i]\n",
94 + " #using cv2.rectangle to plot colors\n",
95 + " cv2.rectangle(chart, (int(start), 0), (int(end), 50), (r,g,b), -1)\n",
96 + " start = end\n",
97 + "\n",
98 + " #display chart\n",
99 + " plt.figure()\n",
100 + " plt.axis(\"off\")\n",
101 + " plt.imshow(chart)\n",
102 + " plt.show()\n",
103 + "\n",
104 + " return colors\n",
105 + " \n",
106 + " \n",
107 + " #tone analysis 함수\n",
108 + "def is_warm(lab_b, a):\n",
109 + " '''\n",
110 + " 파라미터 lab_b = [skin_b, hair_b, eye_b]\n",
111 + " a = 가중치 [skin, hair, eye]\n",
112 + " 질의색상 lab_b값에서 warm의 lab_b, cool의 lab_b값 간의 거리를\n",
113 + " 각각 계산하여 warm이 가까우면 1, 반대 경우 0 리턴\n",
114 + " '''\n",
115 + " # standard of skin, eyebrow, eye (눈썹, 눈동자는 0으로) \n",
116 + " warm_b_std = [38.022000000000006, 0, 0]\n",
117 + " cool_b_std = [17, 0, 0]\n",
118 + "\n",
119 + " warm_dist = 0\n",
120 + " cool_dist = 0\n",
121 + "\n",
122 + " body_part = ['skin', 'eyebrow', 'eye']\n",
123 + " for i in range(1):\n",
124 + " warm_dist += abs(lab_b[i] - warm_b_std[i]) * a[i]\n",
125 + " \n",
126 + " print(body_part[i],\"의 warm 기준값과의 거리\")\n",
127 + " print(abs(lab_b[i] - warm_b_std[i]))\n",
128 + " \n",
129 + " cool_dist += abs(lab_b[i] - cool_b_std[i]) * a[i]\n",
130 + " \n",
131 + " print(body_part[i],\"의 cool 기준값과의 거리\")\n",
132 + " print(abs(lab_b[i] - cool_b_std[i]))\n",
133 + " \n",
134 + " if(warm_dist <= cool_dist):\n",
135 + " return 1 #warm\n",
136 + " else:\n",
137 + " return 0 #cool\n",
138 + "\n",
139 + "# 이미지 자르는 함수\n",
140 + "def trimming (img): \n",
141 + " x = 100; \n",
142 + " y = 100; \n",
143 + " w = 100; \n",
144 + " h = 100; \n",
145 + " \n",
146 + " img_trim = img[y:y+h, x:x+w] \n",
147 + " return img_trim \n",
148 + "\n",
149 + "\n",
150 + "# 원래 main\n",
151 + "def analysis(imgpath):\n",
152 + " #######################################\n",
153 + " # Face detection #\n",
154 + " #######################################\n",
155 + " img=cv2.imread(imgpath)\n",
156 + " \n",
157 + " h,w,c=img.shape\n",
158 + " if((h>500) and (w>500)):\n",
159 + " img = trimming(img) # 이미지가 너무 크면 잘라서 확인\n",
160 + " \n",
161 + " face = [img, img,\n",
162 + " img, img,\n",
163 + " img, img]\n",
164 + "\n",
165 + " \n",
166 + " #######################################\n",
167 + " # Get Dominant Colors #\n",
168 + " #######################################\n",
169 + " temp = []\n",
170 + " clusters = 4\n",
171 + " for f in face:\n",
172 + " dc = DominantColors(f, clusters)\n",
173 + " face_part_color, _ = dc.getHistogram()\n",
174 + " #dc.plotHistogram()\n",
175 + " temp.append(np.array(face_part_color[0]))\n",
176 + " cheek1 = np.mean([temp[0], temp[1]], axis=0)\n",
177 + " cheek2 = np.mean([temp[2], temp[3]], axis=0)\n",
178 + " cheek3 = np.mean([temp[4], temp[5]], axis=0)\n",
179 + "\n",
180 + " Lab_b, hsv_s = [], []\n",
181 + " color = [cheek1, cheek2, cheek3]\n",
182 + " for i in range(3):\n",
183 + " rgb = sRGBColor(color[i][0], color[i][1], color[i][2], is_upscaled=True)\n",
184 + " lab = convert_color(rgb, LabColor, through_rgb_type=sRGBColor)\n",
185 + " hsv = convert_color(rgb, HSVColor, through_rgb_type=sRGBColor)\n",
186 + " Lab_b.append(float(format(lab.lab_b,\".2f\")))\n",
187 + " hsv_s.append(float(format(hsv.hsv_s,\".2f\"))*100)\n",
188 + "\n",
189 + " Lab_b[1]=0\n",
190 + " Lab_b[2]=0\n",
191 + " \n",
192 + " print('Lab_b[skin]',Lab_b[0])\n",
193 + "\n",
194 + " #######################################\n",
195 + " # Personal color Analysis #\n",
196 + " #######################################\n",
197 + " Lab_weight = [100, 0, 0]\n",
198 + " hsv_weight = [10, 0, 0]\n",
199 + " \n",
200 + " if(is_warm(Lab_b, Lab_weight)):\n",
201 + " tone = '웜톤(warm)'\n",
202 + " else:\n",
203 + " tone = '쿨톤(cool)'\n",
204 + " \n",
205 + " # Print Result\n",
206 + " print('{}의 퍼스널 컬러는 {}입니다.'.format(imgpath, tone))\n",
207 + "\n",
208 + " \n",
209 + "def main(): \n",
210 + " analysis('color_data\\warm_spring.png')\n",
211 + " \n",
212 + " \n",
213 + "if __name__ == '__main__':\n",
214 + " main()\n"
215 + ]
216 + },
217 + {
218 + "cell_type": "code",
219 + "execution_count": null,
220 + "metadata": {},
221 + "outputs": [],
222 + "source": []
223 + }
224 + ],
225 + "metadata": {
226 + "kernelspec": {
227 + "display_name": "Python 3",
228 + "language": "python",
229 + "name": "python3"
230 + },
231 + "language_info": {
232 + "codemirror_mode": {
233 + "name": "ipython",
234 + "version": 3
235 + },
236 + "file_extension": ".py",
237 + "mimetype": "text/x-python",
238 + "name": "python",
239 + "nbconvert_exporter": "python",
240 + "pygments_lexer": "ipython3",
241 + "version": "3.6.12"
242 + }
243 + },
244 + "nbformat": 4,
245 + "nbformat_minor": 4
246 +}
This diff is collapsed. Click to expand it.
This diff is collapsed. Click to expand it.
...@@ -15,8 +15,9 @@ class DetectFace: ...@@ -15,8 +15,9 @@ class DetectFace:
15 15
16 #face detection part 16 #face detection part
17 self.img = cv2.imread(image) 17 self.img = cv2.imread(image)
18 - #if self.img.shape[0]>500: 18 +
19 - # self.img = cv2.resize(self.img, dsize=(0,0), fx=0.8, fy=0.8) 19 + if self.img.shape[0]>500:
20 + self.img = cv2.resize(self.img, dsize=(0,0), fx=0.8, fy=0.8)
20 21
21 # init face parts 22 # init face parts
22 self.right_eyebrow = [] 23 self.right_eyebrow = []
...@@ -32,7 +33,7 @@ class DetectFace: ...@@ -32,7 +33,7 @@ class DetectFace:
32 33
33 # return type : np.array 34 # return type : np.array
34 def detect_face_part(self): 35 def detect_face_part(self):
35 - face_parts = [[],[],[],[],[],[],[]] 36 + face_parts = [[],[],[],[],[],[], []]
36 # detect faces in the grayscale image 37 # detect faces in the grayscale image
37 rect = self.detector(cv2.cvtColor(self.img, cv2.COLOR_BGR2GRAY), 1)[0] 38 rect = self.detector(cv2.cvtColor(self.img, cv2.COLOR_BGR2GRAY), 1)[0]
38 39
......