yangjisu

텍스트 추출,문법체크 미완성

Showing 1 changed file with 60 additions and 5 deletions
......@@ -3,7 +3,6 @@ var app = express();
const line = require('@line/bot-sdk');
const config = require('./config');
//papago api
var request = require('request');
......@@ -11,9 +10,22 @@ var request = require('request');
var translate_api_url = 'https://openapi.naver.com/v1/papago/n2mt';
//언어감지 api_url
var languagedetect_api_url = 'https://openapi.naver.com/v1/papago/detectLangs'
var languagedetect_api_url = 'https://openapi.naver.com/v1/papago/detectLangs';
//eng grammar check
var EnGrammarCheck_api_url = 'https://api.textgears.com/check.php';
import textgears from 'textgears';
textgears({
key: '9WUGcY6ZayYMphG7',
text: prompt('Text'),
}).then(res => {
for (const error of res.errors) {
console.log('Bad: %s. Better: %s', error.bad, error.better.join(', '));
}
});
// create LINE SDK client
const client = new line.Client(config.line_config);
......@@ -28,11 +40,54 @@ app.post('/webhook', line.middleware(config), (req, res) => {
.all(req.body.events.map(handleEvent))
.then((result) => res.json(result))
.catch((err) => {
console.error(err);
res.status(200).end();
console.error(err); res.status(200).end();
});
});
// Imports the Google Cloud client library
const vision = require('@google-cloud/vision');
// Creates a client
const client = new vision.ImageAnnotatorClient();
/**
* TODO(developer): Uncomment the following line before running the sample.
*/
// const fileName = 'Local image file, e.g. /path/to/image.png';
// Read a local image as a text document
const [result] = await client.documentTextDetection(fileName);
const fullTextAnnotation = result.fullTextAnnotation;
//console.log(`Full text: ${fullTextAnnotation.text}`);
fullTextAnnotation.pages.forEach(page => {
paragraph.words.forEach(word => {
const wordText = word.symbols.map(s => s.text).join('');
// console.log(`Word text: ${wordText}`);
word.symbols.forEach(symbol => {
// console.log(`Symbol text: ${symbol.text}`);
});
});
});
const vision = require('@google-cloud/vision');
// Creates a client
const client = new vision.ImageAnnotatorClient();
/**
* TODO(developer): Uncomment the following line before running the sample.
*/
// const fileName = 'Local image file, e.g. /path/to/image.png';
// Performs text detection on the local file
const [result] = await client.textDetection(fileName);
const detections = result.textAnnotations;
//console.log('Text:');
detections.forEach(text => console.log(text));
// event handler
function handleEvent(event) {
if (event.type !== 'message' || event.message.type !== 'text') {
......@@ -58,7 +113,7 @@ function handleEvent(event) {
//언어 감지가 제대로 됐는지 확인
console.log(detect_body.langCode);
//번역은 한국어->영어 / 영어->한국어만 지원
if(detect_body.langCode == 'ko'||detect_body.langCode == 'en'){
......