content.py 4.4 KB
import csv
import json
import time
import random
import os
import re
import pandas as pd
import numpy as np

from selenium import webdriver
from selenium.common.exceptions import NoSuchElementException
from bs4 import BeautifulSoup
from datetime import datetime, timedelta

BASE_DIR = os.path.dirname(os.path.realpath(__file__))

def sleeptime():
    time.sleep(random.randint(1, 3))

def Click(xpath, driver):
    element = driver.find_element_by_xpath(xpath)
    driver.execute_script("arguments[0].click();", element)
    sleeptime()

def GetData():
    login_info = {
        'userID' : '********',
        'userpw' : '********'
    }

    options = webdriver.ChromeOptions()
    options.add_argument('headless')
    options.add_argument('no-sandbox')
    options.add_argument('window-size=1920x1080')
    options.add_argument('disable-gpu')
    options.add_argument('disable-dev-shm-usage')
    options.add_argument('lang=ko_KR')
    options.add_argument('user-agent=Mozilla/5.0 (Windows NT 10.0; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/65.0.3325.183 Safari/537.36 Vivaldi/1.96.1147.47')

    # driver = webdriver.Chrome(r'C:\Users\E_N__\Desktop\chromedriver.exe', options=options)
    driver = webdriver.Chrome(BASE_DIR + '/chromedriver.exe', options=options)

    driver.get('about:blank')
    driver.execute_script("Object.defineProperty(navigator, 'plugins', {get: function() {return[1, 2, 3, 4, 5];},});")
    sleeptime()
    driver.get('https://everytime.kr/login')

    sleeptime()
    driver.find_element_by_name('userid').send_keys(login_info['userID'])
    driver.find_element_by_name('password').send_keys(login_info['userpw'])
    driver.find_element_by_class_name('submit').click()
    sleeptime()

    # 국제캠 자게
    sleeptime()
    yesterday = (datetime.today() - timedelta(1)).strftime('%m/%d %H:%M')
    print(yesterday)
    swt = True
    page = 0

    post_df = pd.DataFrame(columns=['title', 'content'])
    while swt:
        if page < 1:
            Click('//*[@id="submenu"]/div/div[2]/ul/li[1]/a', driver)
            page += 1
        else:
            if page == 1:
                Click('//*[@id="container"]/div[2]/div[2]/a', driver)
                page += 1
            elif page == 2:
                Click('//*[@id="container"]/div[2]/div[2]/a[2]', driver)
                page += 1
            else:
                Click('//*[@id="container"]/div[2]/div[2]/a[3]', driver)

        html = driver.page_source
        soup = BeautifulSoup(html, 'html.parser')

        TitleList = soup.select('#container > div.wrap.articles > article > a > h2')
        DateList = soup.select('#container > div.wrap.articles > article > a > time')

        for post in zip(TitleList, DateList):
            title = re.sub(pattern='[^\w\s]', repl='', string=post[0].text)
            try:
                Click("//h2[contains(text(), '{}')]".format(title), driver)
            except NoSuchElementException:
                continue
            content = driver.find_element_by_xpath('//*[@id="container"]/div[2]/article/a/p').text
            driver.back()
            sleeptime()

            if not (post_df['title'] == title).any():
                # Click('//*[@id="container"]/div[2]/article[{}]'.format(idx))
                content = re.sub(pattern='[^\w\s]', repl='', string=content)
                content = re.sub(pattern='\n', repl=' ', string=content)
                post_df = post_df.append(pd.DataFrame([[title, content]],
                                                      columns=['title', 'content']))
                # print("{0}. {1} : {2}".format(idx, title, content))
            if post[1].text <= yesterday:
                break
        break

    post_df.to_csv('data.csv', mode='w', encoding='utf-8-sig', index=False)
    print("CVS file saved")
    # print(post_df)
    # exit()
    # post_df.reset_index(drop=True, inplace=True)
    # post_df.to_json('data.json')
    # # with open('data.json', 'w', encoding='utf-8-sig') as file:
    # #     post_df.to_json(file, force_ascii=False)

    with open('data.json', 'w+', encoding='utf-8-sig') as json_file:
        for post in zip(post_df['title'].tolist(), post_df['content'].tolist()):
            json.dump(post[0] + post[1], json_file, ensure_ascii=False)
    print("JSON file saved")

GetData()
######## TODO: JSON으로 저장
######## 형식 : { "document" : { "type" : "PLAIN_TEXT", "content" : "~~" }, "encodingType" : "UTF8" }
######## GOOGLE Sentiment Analyzer 사용을 위해