GetTopic.py
1.69 KB
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
import os
import csv
import re
from sklearn.metrics.pairwise import cosine_similarity
from sklearn.feature_extraction.text import TfidfVectorizer
from sklearn.cluster import KMeans
from konlpy.tag import Okt
from konlpy.tag import Komoran
from textrank import KeywordSummarizer
okt = Okt()
def Okt_tokenizer(sent):
words = okt.nouns(sent)
# words = okt.pos(sent, join=True, stem=True)
# words = [w for w in words if('/Noun' in w or '/Verb' in w or '/Adjective' in w)]
return words
komoran = Komoran()
def komoran_tokenizer(sent):
# words = []
# for sentence in sent:
# words += komoran.pos(sentence, join=True)
# print("check : ", komoran.pos(sentence, join=True))
# words = [komoran.pos(sentence, join=True) for sentence in sent]
words = komoran.pos(sent, join=True)
words = [w for w in words if('/NNG' in w or '/NNP' in w)]
return words
BASE_DIR = os.path.dirname(os.path.abspath(__file__))
posts = []
with open(os.path.join(BASE_DIR + '/', 'data.csv'), 'r', encoding='utf-8') as db:
reader = csv.reader(db)
for data in reader:
data[0] = re.sub(pattern='[^\w\s]', repl='', string=data[0]).replace('\n', '')
data[1] = re.sub(pattern='[^\w\s]', repl='', string=data[1]).replace('\n', '')
posts.append(data[0] + data[1])
# tfidf_vectorizer = TfidfVectorizer()
# title_vector = tfidf_vectorizer.fit_transform(noun['title'] for noun in nouns)
# content_vector = tfidf_vectorizer.fit_transform(noun['content'] for noun in nouns)
keyword_extractor = KeywordSummarizer(
# tokenize=Okt_tokenizer,
tokenize=komoran_tokenizer,
window = -1,
verbose= False
)
keywords = keyword_extractor.summarize(posts, topk=30)
print(keywords)