박선진

동영상 프레임 단위로 분할 후 디텍션 api에 request

const express = require('express');
const fs = require('fs');
const {
cv,
getDataFilePath,
drawBlueRect,
drawGreenRect
} = require('./utils');
const openCV = require('opencv4nodejs');
const ffmpeg = require('ffmpeg');
const request = require('request')
const router = express.Router();
const path = require('path');
//================================================================
router.post('/videoResult', function (req, res) {
try {
let preview = req.body[0].preview;
str = preview.replace(/^data:(.*?);base64,/, "");
str = str.replace(/ /g, '+');
let preview = req.body[0].preview;
str = preview.replace(/^data:(.*?);base64,/, "");
str = str.replace(/ /g, '+');
fs.writeFileSync(`./data/temp.mp4`, str, 'base64', function (err) {
if (err) throw err;
console.log("video saved");
return;
})
let process = new ffmpeg(`./data/temp.mp4`);
detectedImgFile = "test.jpg"; // null로 바꿀 것
process.then(function (video) {
video.fnExtractFrameToJPG(__dirname + "/data",
{
every_n_seconds: 1,
file_name: 'frame_%s'
}, function (error, files) {
if (!error)
console.log('###1 Frames =>' + files);
console.log("###2 갯수 => " + files.length)
console.log("###3 첫번째 파일 => " + files[0]); // 마지막 파일은 영상임
let base64str = base64_encode(files[0]);
console.log("###4 base64str => " + base64str);
console.log("##### for")
for(var i=0;i<files.length-1;i++){
request.post({
url: 'http://101.101.210.73/process',
form: {
'data': base64_encode(files[0])
},
json: true
}, (err, response, body) => {
console.log(body)
})
}
/**
* TODO
* 반복문돌면서 각 프레임 파이썬 api 요청하고 응답받아서
* 하나라도 true나오면 프론트에 감지된 이미지파일주소 응답
*/
detectedImgFile=null;
}
)
})
fs.writeFile(`./data/temp.mp4`, str, 'base64', function (err) {
if (err) throw err;
console.log("saved");
const vCap = new openCV.VideoCapture('./data/temp.mp4')
const delay = 1000;
let done = false;
let cnt = 0;
while (!done) {
let frame = vCap.read();
cv.imwrite('./data/' + cnt + '.jpg');
cnt++;
if (frame.empty) {
vCap.reset();
frame = vCap.read();
}
}
});
} catch (err) {
console.log("err : " + err);
console.error(err);
}
return res.json({ data: 'myData' });
return res.json({ data: detectedImgFile });
});
//================================================================
// router.post('/faceRecognition', function (req, res) {
// try {
// let preview = req.body[0].preview;
// str = preview.replace(/^data:(.*?);base64,/, "");
// str = str.replace(/ /g, '+');
// // 임시파일 저장
// fs.writeFile(`./data/temp.jpg`, str, 'base64', function (err) {
// if (err) throw err;
// console.log("saved");
// detectFaceAndEyes('./data/temp.jpg');
// });
// } catch (err) {
// console.log('err: ' + err);
// }
// return res.json({ data: 'myData' });
// });
//================================================================
function base64encode(plaintext) {
return Buffer.from(plaintext, "utf8").toString('base64');
}
function base64decode(base64text) {
console.log(base64text.length);
return Buffer.from(base64text, 'base64').toString('utf8');
function base64_encode(file) {
// read binary data
var bitmap = fs.readFileSync(file);
// convert binary data to base64 encoded string
return new Buffer(bitmap).toString('base64');
}
// function detectFaceAndEyes(filePath) {
// const image = cv.imread(filePath);
// const faceClassifier = new cv.CascadeClassifier(cv.HAAR_FRONTALFACE_DEFAULT);
// const eyeClassifier = new cv.CascadeClassifier(cv.HAAR_EYE);
// // detect faces
// const faceResult = faceClassifier.detectMultiScale(image.bgrToGray());
// if (!faceResult.objects.length) {
// throw new Error('No faces detected!');
// }
// const sortByNumDetections = result => result.numDetections
// .map((num, idx) => ({ num, idx }))
// .sort(((n0, n1) => n1.num - n0.num))
// .map(({ idx }) => idx);
// // get best result
// const faceRect = faceResult.objects[sortByNumDetections(faceResult)[0]];
// console.log('faceRects:', faceResult.objects);
// console.log('confidences:', faceResult.numDetections);
// // detect eyes
// const faceRegion = image.getRegion(faceRect);
// const eyeResult = eyeClassifier.detectMultiScale(faceRegion);
// console.log('eyeRects:', eyeResult.objects);
// console.log('confidences:', eyeResult.numDetections);
// // get best result
// const eyeRects = sortByNumDetections(eyeResult)
// .slice(0, 2)
// .map(idx => eyeResult.objects[idx]);
// // draw face detection
// drawBlueRect(image, faceRect);
// // draw eyes detection in face region
// eyeRects.forEach(eyeRect => drawGreenRect(faceRegion, eyeRect));
// cv.imwrite(`./data/temp2.jpg`, image);
// }
module.exports = router;
\ No newline at end of file
......
This diff is collapsed. Click to expand it.
{
"name": "back-end",
"version": "1.0.0",
"description": "",
"main": "apiRouter.js",
"scripts": {
"test": "echo \"Error: no test specified\" && exit 1",
"start": "node server.js"
},
"author": "",
"license": "ISC",
"dependencies": {
"body-parser": "^1.19.0",
"cors": "^2.8.5",
"express": "^4.17.1",
"ffmpeg": "0.0.4",
"opencv4nodejs": "^5.6.0",
"path": "^0.12.7",
"request": "^2.88.2"
}
}
......@@ -12,5 +12,5 @@ app.use(bodyParser.urlencoded({limit: '100mb', extended: true}));
app.use(bodyParser());
app.use('/api', api);
const port = 3002;
const port = 3003;
app.listen(port, () => console.log(`노드서버 시작 : ${port}`));
......
const path = require('path');
const cv = require('opencv4nodejs');
exports.cv = cv;
const dataPath = path.resolve(__dirname, './data');
exports.dataPath = dataPath;
exports.getDataFilePath = fileName => {
let targetPath = path.resolve(dataPath, fileName);
return targetPath;
}
const grabFrames = (videoFile, delay, onFrame) => {
const cap = new cv.VideoCapture(videoFile);
let done = false;
const intvl = setInterval(() => {
let frame = cap.read();
// loop back to start on end of stream reached
if (frame.empty) {
cap.reset();
frame = cap.read();
}
onFrame(frame);
const key = cv.waitKey(delay);
done = key !== -1 && key !== 255;
if (done) {
clearInterval(intvl);
console.log('Key pressed, exiting.');
}
}, 0);
};
exports.grabFrames = grabFrames;
exports.runVideoDetection = (src, detect) => {
grabFrames(src, 1, frame => {
detect(frame);
});
};
exports.drawRectAroundBlobs = (binaryImg, dstImg, minPxSize, fixedRectWidth) => {
const {
centroids,
stats
} = binaryImg.connectedComponentsWithStats();
// pretend label 0 is background
for (let label = 1; label < centroids.rows; label += 1) {
const [x1, y1] = [stats.at(label, cv.CC_STAT_LEFT), stats.at(label, cv.CC_STAT_TOP)];
const [x2, y2] = [
x1 + (fixedRectWidth || stats.at(label, cv.CC_STAT_WIDTH)),
y1 + (fixedRectWidth || stats.at(label, cv.CC_STAT_HEIGHT))
];
const size = stats.at(label, cv.CC_STAT_AREA);
const blue = new cv.Vec(255, 0, 0);
if (minPxSize < size) {
dstImg.drawRectangle(
new cv.Point(x1, y1),
new cv.Point(x2, y2),
{ color: blue, thickness: 2 }
);
}
}
};
const drawRect = (image, rect, color, opts = { thickness: 2 }) =>
image.drawRectangle(
rect,
color,
opts.thickness,
cv.LINE_8
);
exports.drawRect = drawRect;
exports.drawBlueRect = (image, rect, opts = { thickness: 2 }) =>
drawRect(image, rect, new cv.Vec(255, 0, 0), opts);
exports.drawGreenRect = (image, rect, opts = { thickness: 2 }) =>
drawRect(image, rect, new cv.Vec(0, 255, 0), opts);
exports.drawRedRect = (image, rect, opts = { thickness: 2 }) =>
drawRect(image, rect, new cv.Vec(0, 0, 255), opts);
\ No newline at end of file