조수연

알고리즘 완성본

{
"cells": [
{
"cell_type": "code",
"execution_count": 72,
"metadata": {},
"outputs": [
{
"name": "stdout",
"output_type": "stream",
"text": [
"Lab_b[skin] 29.32\n",
"skin 의 warm 기준값과의 거리\n",
"8.702000000000005\n",
"skin 의 cool 기준값과의 거리\n",
"12.32\n",
"color_data\\warm_spring.png의 퍼스널 컬러는 웜톤(warm)입니다.\n"
]
}
],
"source": [
"# 평균 색상 추출\n",
"import cv2\n",
"import numpy as np\n",
"from sklearn.cluster import KMeans\n",
"import matplotlib.pyplot as plt\n",
"from mpl_toolkits.mplot3d import Axes3D\n",
"from skimage import io\n",
"from itertools import compress\n",
"\n",
"# tone 구별 \n",
"from scipy.spatial import distance\n",
"import copy\n",
"import math\n",
"import operator\n",
"\n",
"# main함수\n",
"from colormath.color_objects import LabColor, sRGBColor, HSVColor\n",
"from colormath.color_conversions import convert_color\n",
"\n",
"# color extract 클래스\n",
"class DominantColors:\n",
"\n",
" CLUSTERS = None\n",
" IMAGE = None\n",
" COLORS = None\n",
" LABELS = None\n",
"\n",
" def __init__(self, image, clusters=3):\n",
" self.CLUSTERS = clusters\n",
" img = cv2.cvtColor(image, cv2.COLOR_BGR2RGB)\n",
" self.IMAGE = img.reshape((img.shape[0] * img.shape[1], 3))\n",
"\n",
" #using k-means to cluster pixels\n",
" kmeans = KMeans(n_clusters = self.CLUSTERS)\n",
" kmeans.fit(self.IMAGE)\n",
"\n",
" #the cluster centers are our dominant colors.\n",
" self.COLORS = kmeans.cluster_centers_\n",
" self.LABELS = kmeans.labels_\n",
"\n",
" def rgb_to_hex(self, rgb):\n",
" return '#%02x%02x%02x' % (int(rgb[0]), int(rgb[1]), int(rgb[2]))\n",
"\n",
" # Return a list in order of color that appeared most often.\n",
" def getHistogram(self):\n",
" numLabels = np.arange(0, self.CLUSTERS+1)\n",
" #create frequency count tables\n",
" (hist, _) = np.histogram(self.LABELS, bins = numLabels)\n",
" hist = hist.astype(\"float\")\n",
" hist /= hist.sum()\n",
"\n",
" colors = self.COLORS\n",
" #descending order sorting as per frequency count\n",
" colors = colors[(-hist).argsort()]\n",
" hist = hist[(-hist).argsort()]\n",
" for i in range(self.CLUSTERS):\n",
" colors[i] = colors[i].astype(int)\n",
" # Blue mask 제거\n",
" fil = [colors[i][2] < 250 and colors[i][0] > 10 for i in range(self.CLUSTERS)]\n",
" colors = list(compress(colors, fil))\n",
" return colors, hist\n",
"\n",
" def plotHistogram(self):\n",
" colors, hist = self.getHistogram()\n",
" #creating empty chart\n",
" chart = np.zeros((50, 500, 3), np.uint8)\n",
" start = 0\n",
"\n",
" #creating color rectangles\n",
" for i in range(len(colors)):\n",
" end = start + hist[i] * 500\n",
" r,g,b = colors[i]\n",
" #using cv2.rectangle to plot colors\n",
" cv2.rectangle(chart, (int(start), 0), (int(end), 50), (r,g,b), -1)\n",
" start = end\n",
"\n",
" #display chart\n",
" plt.figure()\n",
" plt.axis(\"off\")\n",
" plt.imshow(chart)\n",
" plt.show()\n",
"\n",
" return colors\n",
" \n",
" \n",
" #tone analysis 함수\n",
"def is_warm(lab_b, a):\n",
" '''\n",
" 파라미터 lab_b = [skin_b, hair_b, eye_b]\n",
" a = 가중치 [skin, hair, eye]\n",
" 질의색상 lab_b값에서 warm의 lab_b, cool의 lab_b값 간의 거리를\n",
" 각각 계산하여 warm이 가까우면 1, 반대 경우 0 리턴\n",
" '''\n",
" # standard of skin, eyebrow, eye (눈썹, 눈동자는 0으로) \n",
" warm_b_std = [38.022000000000006, 0, 0]\n",
" cool_b_std = [17, 0, 0]\n",
"\n",
" warm_dist = 0\n",
" cool_dist = 0\n",
"\n",
" body_part = ['skin', 'eyebrow', 'eye']\n",
" for i in range(1):\n",
" warm_dist += abs(lab_b[i] - warm_b_std[i]) * a[i]\n",
" \n",
" print(body_part[i],\"의 warm 기준값과의 거리\")\n",
" print(abs(lab_b[i] - warm_b_std[i]))\n",
" \n",
" cool_dist += abs(lab_b[i] - cool_b_std[i]) * a[i]\n",
" \n",
" print(body_part[i],\"의 cool 기준값과의 거리\")\n",
" print(abs(lab_b[i] - cool_b_std[i]))\n",
" \n",
" if(warm_dist <= cool_dist):\n",
" return 1 #warm\n",
" else:\n",
" return 0 #cool\n",
"\n",
"# 이미지 자르는 함수\n",
"def trimming (img): \n",
" x = 100; \n",
" y = 100; \n",
" w = 100; \n",
" h = 100; \n",
" \n",
" img_trim = img[y:y+h, x:x+w] \n",
" return img_trim \n",
"\n",
"\n",
"# 원래 main\n",
"def analysis(imgpath):\n",
" #######################################\n",
" # Face detection #\n",
" #######################################\n",
" img=cv2.imread(imgpath)\n",
" \n",
" h,w,c=img.shape\n",
" if((h>500) and (w>500)):\n",
" img = trimming(img) # 이미지가 너무 크면 잘라서 확인\n",
" \n",
" face = [img, img,\n",
" img, img,\n",
" img, img]\n",
"\n",
" \n",
" #######################################\n",
" # Get Dominant Colors #\n",
" #######################################\n",
" temp = []\n",
" clusters = 4\n",
" for f in face:\n",
" dc = DominantColors(f, clusters)\n",
" face_part_color, _ = dc.getHistogram()\n",
" #dc.plotHistogram()\n",
" temp.append(np.array(face_part_color[0]))\n",
" cheek1 = np.mean([temp[0], temp[1]], axis=0)\n",
" cheek2 = np.mean([temp[2], temp[3]], axis=0)\n",
" cheek3 = np.mean([temp[4], temp[5]], axis=0)\n",
"\n",
" Lab_b, hsv_s = [], []\n",
" color = [cheek1, cheek2, cheek3]\n",
" for i in range(3):\n",
" rgb = sRGBColor(color[i][0], color[i][1], color[i][2], is_upscaled=True)\n",
" lab = convert_color(rgb, LabColor, through_rgb_type=sRGBColor)\n",
" hsv = convert_color(rgb, HSVColor, through_rgb_type=sRGBColor)\n",
" Lab_b.append(float(format(lab.lab_b,\".2f\")))\n",
" hsv_s.append(float(format(hsv.hsv_s,\".2f\"))*100)\n",
"\n",
" Lab_b[1]=0\n",
" Lab_b[2]=0\n",
" \n",
" print('Lab_b[skin]',Lab_b[0])\n",
"\n",
" #######################################\n",
" # Personal color Analysis #\n",
" #######################################\n",
" Lab_weight = [100, 0, 0]\n",
" hsv_weight = [10, 0, 0]\n",
" \n",
" if(is_warm(Lab_b, Lab_weight)):\n",
" tone = '웜톤(warm)'\n",
" else:\n",
" tone = '쿨톤(cool)'\n",
" \n",
" # Print Result\n",
" print('{}의 퍼스널 컬러는 {}입니다.'.format(imgpath, tone))\n",
"\n",
" \n",
"def main(): \n",
" analysis('color_data\\warm_spring.png')\n",
" \n",
" \n",
"if __name__ == '__main__':\n",
" main()\n"
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": []
}
],
"metadata": {
"kernelspec": {
"display_name": "Python 3",
"language": "python",
"name": "python3"
},
"language_info": {
"codemirror_mode": {
"name": "ipython",
"version": 3
},
"file_extension": ".py",
"mimetype": "text/x-python",
"name": "python",
"nbconvert_exporter": "python",
"pygments_lexer": "ipython3",
"version": "3.6.12"
}
},
"nbformat": 4,
"nbformat_minor": 4
}
This diff is collapsed. Click to expand it.
This diff is collapsed. Click to expand it.
......@@ -15,8 +15,9 @@ class DetectFace:
#face detection part
self.img = cv2.imread(image)
#if self.img.shape[0]>500:
# self.img = cv2.resize(self.img, dsize=(0,0), fx=0.8, fy=0.8)
if self.img.shape[0]>500:
self.img = cv2.resize(self.img, dsize=(0,0), fx=0.8, fy=0.8)
# init face parts
self.right_eyebrow = []
......@@ -32,7 +33,7 @@ class DetectFace:
# return type : np.array
def detect_face_part(self):
face_parts = [[],[],[],[],[],[],[]]
face_parts = [[],[],[],[],[],[], []]
# detect faces in the grayscale image
rect = self.detector(cv2.cvtColor(self.img, cv2.COLOR_BGR2GRAY), 1)[0]
......