Open fabriciorsf opened 3 years ago
Hi! This error occurs when you try to add image path in web application. Can you share block of code which is giving error ?
The images in at path ./images/
.
Follow my code, edited from webcam_demo project.
./src/App.js
import { useRef } from 'react';
import { CompreFace } from 'compreface-javascript-sdk';
import './App.css';
function App() {
const videoTag = useRef(null);
const canvas1 = useRef(null);
const canvas2 = useRef(null);
const canvas3 = useRef(null);
const server = "http://10.0.0.11"; // my server is running on another machine in my network
const port = 8000;
const detection_key = "my_detection_key";
const recognition_key = "my_recognition_key";
const verification_key = "my_verification_key";
const drawFace = (canvasElement, faceData, extraCanvas) => {
const evt = new Event("next_frame", {"bubbles":true, "cancelable":false});
document.dispatchEvent(evt);
let box = faceData.result[0].box;
canvasElement.clearRect(0, 0, 640, 480);
extraCanvas.clearRect(0, 0, 640, 480);
canvasElement.strokeStyle = 'green';
extraCanvas.strokeStyle = "blue";
extraCanvas.fillStyle = "white"
extraCanvas.lineWidth = 5;
canvasElement.lineWidth = 5;
canvasElement.strokeRect(box.x_min, box.y_min, box.x_max - box.x_min, box.y_max - box.y_min);
extraCanvas.fillText( Number.parseFloat(box.probability).toPrecision(5) + ' ' + faceData.result[0].gender + ' ' + faceData.result[0].age[0] + '-' + faceData.result[0].age[1], box.x_min, box.y_min - 10)
}
const handleVideoStart = () => {
navigator.mediaDevices.getUserMedia({ video: true})
.then(stream => videoTag.current.srcObject = stream)
.catch( error => {
console.log(error);
})
videoTag.current.addEventListener('play', () => {
// Detection Service init of CompreFace
let detection_service = core.initFaceDetectionService(detection_key);
let ctx1 = canvas1.current.getContext('2d');
let ctx2 = canvas2.current.getContext('2d');
let ctx3 = canvas3.current.getContext("2d");
document.addEventListener('next_frame', () => {
ctx1.drawImage(videoTag.current, 0, 0, 640, 480)
canvas1.current.toBlob( blob => {
detection_service.detect(blob, { limit: 1, face_plugins: 'age,gender' })
.then(res => {
drawFace(ctx2, res, ctx3)
})
.catch(error => {
console.log(error);
})
}, 'image/jpeg', 0.95)
})
const evt = new Event("next_frame", {"bubbles":true, "cancelable":false});
document.dispatchEvent(evt);
})
}
const addImages = (recognitionService, arrImagesName) => {
let faceCollection = recognitionService.getFaceCollection(); // use face collection to fill it with known faces
arrImagesName.forEach(function (imageName) {
let path_to_image = `./images/${imageName}.jpg`;
let nameImgEncoded = encodeURIComponent(imageName);
faceCollection.add(path_to_image, nameImgEncoded).then(response => {
console.log(`Image ${path_to_image} added!`);
}).catch(error => {
console.error(`ERROR: Oops! There is problem in uploading image ${error}.`);
console.error(error);
});
});
}
// CompreFace init
let core = new CompreFace(server, port);
console.log(core);
let recognitionService = core.initFaceRecognitionService(recognition_key); // initialize service
let arrImagesName = ['personA01', 'personB01'];
addImages(recognitionService, arrImagesName);
// end of CompreFace init
return (
<div className="App">
<header className="App-header">
<video ref={videoTag} width="640" height="480" autoPlay muted ></video>
<canvas ref={canvas1} width="640" id="canvas" height="480" style={{ display: 'none' }}></canvas>
<canvas ref={canvas2} width="640" id="canvas2" height="480" style={{ position: 'absolute' }} ></canvas>
<canvas ref={canvas3} width="640" height="480" style={{ position: 'absolute' }}></canvas>
<div>
<button onClick={handleVideoStart} >Start video</button>
</div>
</header>
</div>
);
}
export default App;
./package.json
{
"name": "webcam-tutorial",
"version": "0.1.0",
"private": true,
"dependencies": {
"@exadel/compreface-js-sdk": "^0.6.0",
"compreface-javascript-sdk": "^2.3.2",
"react": "^17.0.2",
"react-dom": "^17.0.2",
"react-scripts": "^4.0.3",
"react-terminal-logger": "^1.3.8",
"web-vitals": "^2.1.0",
"@testing-library/jest-dom": "^5.11.4",
"@testing-library/react": "^11.1.0",
"@testing-library/user-event": "^12.1.10"
},
"scripts": {
"start": "react-scripts start",
"build": "react-scripts build",
"test": "react-scripts test",
"eject": "react-scripts eject"
},
"eslintConfig": {
"extends": [
"react-app",
"react-app/jest"
]
},
"browserslist": {
"production": [
">0.2%",
"not dead",
"not op_mini all"
],
"development": [
"last 1 chrome version",
"last 1 firefox version",
"last 1 safari version"
]
}
}
You can not pass image path directly in web application without user interaction (upload image) because of security concern. It is only possible on backend with Nodejs for example. In your case you should add some input field to upload your image and attach event listener to take value of it. Then pass it to sdk
Describe the bug I can't add faces images to the face collection. I get the following error:
TypeError: fs__WEBPACK_IMPORTED_MODULE_1___default.a.createReadStream is not a function
. I tested at versions:0.5.2
and0.6.0
0.6.0
To Reproduce
Expected behavior A clear and concise description of what you expected to happen.
Logs
Additional context I checked line 84 from
common_endpoints.js
(at functionupload_path
), and there is the following command: https://github.com/exadel-inc/compreface-javascript-sdk/blob/52260bd5444c040f7aaf3369cb64e4b2a37605d4/endpoints/common_endpoints.js#L84Can you help me avoid this possible bug? Tks!