천현우

changed looks like server

const recorder = require('node-record-lpcm16');
// Imports the Google Cloud client library
const speech = require('@google-cloud/speech');
// Creates a client
const client = new speech.SpeechClient();
/**
* TODO(developer): Uncomment the following lines before running the sample.
*/
const encoding = 'LINEAR16';
const sampleRateHertz = 16000;
const languageCode = 'ko-KR';
const speechContexts = [{
const express = require('express');
const multer = require('multer');
const fs = require('fs');
const upload = multer();
const app = express();
const port = 5501;
app.use(express.static('./'));
async function testGoogleTextToSpeech(audioBuffer) {
const speech = require('@google-cloud/speech');
const client = new speech.SpeechClient();
const audio = {
content: audioBuffer.toString('base64'),
};
const encoding = 'LINEAR16';
const sampleRateHertz = 48000;
const languageCode = 'ko-KR';
const speechContexts = [{
phrases: [
'$OOV_CLASS_ALPHA_SEQUENCE 는 $OOV_CLASS_ALPHA_SEQUENCE', '$OOV_CLASS_ALPHA_SEQUENCE 는 $OOV_CLASS_DIGIT_SEQUENCE',
'$OOV_CLASS_ALPHA_SEQUENCE 는 $OOV_CLASS_ALPHA_SEQUENCE 보다 크다',
'$OOV_CLASS_ALPHA_SEQUENCE 는 $OOV_CLASS_ALPHA_SEQUENCE 보다 작다',
'$OOV_CLASS_ALPHA_SEQUENCE 는 $OOV_CLASS_DIGIT_SEQUENCE 보다 크다',
'$OOV_CLASS_ALPHA_SEQUENCE 는 $OOV_CLASS_DIGIT_SEQUENCE 보다 작다',
'for OOV_CLASS_ALPHA_SEQUENCE in range $OOV_CLASS_DIGIT_SEQUENCE',
'$$OOV_CLASS_ALPHANUMERIC_SEQUENCE 는 $OOV_CLASS_ALPHANUMERIC_SEQUENCE',
'$OOV_CLASS_ALPHANUMERIC_SEQUENCE 는 $OOV_CLASS_ALPHANUMERIC_SEQUENCE 보다 크다',
'$OOV_CLASS_ALPHANUMERIC_SEQUENCE 는 $OOV_CLASS_ALPHANUMERIC_SEQUENCE 보다 작다',
'for OOV_CLASS_ALPHA_SEQUENCE in range $OOV_CLASS_ALPHANUMERIC_SEQUENCE',
'if',
'이프',
'else if',
......@@ -38,9 +37,10 @@ const speechContexts = [{
'조건문 선언'
],
boost: 20
}]
}]
const request = {
const request = {
audio: audio,
config: {
encoding: encoding,
sampleRateHertz: sampleRateHertz,
......@@ -49,33 +49,19 @@ const request = {
speechContexts: speechContexts
},
interimResults: false, // If you want interim results, set this to true
};
// Create a recognize stream
const recognizeStream = client
.streamingRecognize(request)
.on('error', console.error)
.on('data', data =>
process.stdout.write(
data.results[0] && data.results[0].alternatives[0]
? `Transcription: ${data.results[0].alternatives[0].transcript}\n`
: '\n\nReached transcription time limit, press Ctrl+C\n'
)
);
// Start recording and send the microphone input to the Speech API.
// Ensure SoX is installed, see https://www.npmjs.com/package/node-record-lpcm16#dependencies
recorder
.record({
sampleRateHertz: sampleRateHertz,
threshold: 0,
// Other options, see https://www.npmjs.com/package/node-record-lpcm16#options
verbose: false,
recordProgram: 'sox', // Try also "arecord" or "sox"
silence: '1.0',
})
.stream()
.on('error', console.error)
.pipe(recognizeStream);
console.log('Listening, press Ctrl+C to stop.');
};
const [response] = await client.recognize(request);
const transcription = response.results
.map(result => result.alternatives[0].transcript)
.join('\n');
return transcription;
}
app.post('/upload_sound', upload.any(), async (req, res) => {
console.log("Getting text transcription..");
let transcription = await testGoogleTextToSpeech(req.files[0].buffer);
console.log("Text transcription: " + transcription);
res.status(200).send(transcription);
});
app.listen(port, () => {
console.log(`Express server listening on port: ${port}...`);
});
\ No newline at end of file
......