Requires HTML, javascript, and typescript if needed
Below is index.html
<!DOCTYPE html>
<script>
function delay(milisecondDelay) {
milisecondDelay += Date.now();
while(Date.now() < milisecondDelay){}
}
delay(5000);
</script>
<html>
<head>
<script src="https://ajax.googleapis.com/ajax/libs/jquery/3.2.1/jquery.min.js"></script>
<script>
</script>
</head>
<body>
</body>
<div id="uniqid">
</div>
<html>
<head>
<script src="https://cdn.jsdelivr.net/npm/@tensorflow/tfjs"></script>
<script src="https://cdn.jsdelivr.net/npm/@tensorflow-models/blazeface"></script>
<meta charset="utf-8">
<link rel="stylesheet" href="main.css">
<title>Tensor track</title>
</head>
<body>
<div class ="background">
<div class="blur">
<h2 id="heading"></h3>
<div id="videoContainer">
<video id="webcam" width="1000" height="1000" autoplay style="display:none" ></video>
<canvas id="canvas" width="640" height="480"></canvas>
</div>
<script src ="index.js" type="text/javascript"></script>
</div>
</div>
</body>
</html>
<script type="module">
const videoElement = document.getElementsByClassName('input_video')[0];
const canvasElement = document.getElementsByClassName('output_canvas')[0];
const canvasCtx = canvasElement.getContext('2d');
function onResults(results) {
canvasCtx.save();
canvasCtx.clearRect(0, 0, canvasElement.width, canvasElement.height);
canvasCtx.drawImage(
results.image, 0, 0, canvasElement.width, canvasElement.height);
if (results.multiFaceLandmarks) {
for (const landmarks of results.multiFaceLandmarks) {
drawConnectors(canvasCtx, landmarks, FACEMESH_TESSELATION,
{color: '#C0C0C070', lineWidth: 1});
drawConnectors(canvasCtx, landmarks, FACEMESH_RIGHT_EYE, {color: '#FF3030'});
drawConnectors(canvasCtx, landmarks, FACEMESH_RIGHT_EYEBROW, {color: '#FF3030'});
drawConnectors(canvasCtx, landmarks, FACEMESH_RIGHT_IRIS, {color: '#FF3030'});
drawConnectors(canvasCtx, landmarks, FACEMESH_LEFT_EYE, {color: '#30FF30'});
drawConnectors(canvasCtx, landmarks, FACEMESH_LEFT_EYEBROW, {color: '#30FF30'});
drawConnectors(canvasCtx, landmarks, FACEMESH_LEFT_IRIS, {color: '#30FF30'});
drawConnectors(canvasCtx, landmarks, FACEMESH_FACE_OVAL, {color: 'blue'});
drawConnectors(canvasCtx, landmarks, FACEMESH_LIPS, {color: '#E0E0E0'});
}
}
canvasCtx.restore();
}
const faceMesh = new FaceMesh({locateFile: (file) => {
return `https://cdn.jsdelivr.net/npm/@mediapipe/face_mesh/${file}`;
}});
faceMesh.setOptions({
maxNumFaces: 1,
refineLandmarks: true,
minDetectionConfidence: 0.5,
minTrackingConfidence: 0.5
});
faceMesh.onResults(onResults);
const camera = new Camera(videoElement, {
onFrame: async () => {
await faceMesh.send({image: videoElement});
},
width: 1280,
height: 720
});
camera.start();
</script>
below is index.js
let a = 1;
let model;
(function() {
let canvas = document.getElementById('canvas'),
context = canvas.getContext('2d'),
video = document.getElementById('webcam');
navigator.getMedia = navigator.getUserMedia || navigator.webkitGetUserMedia || navigator.mozGetUserMedia || navigator.msGetUserMedia;
navigator.getMedia({
video: true,
audio: false
}, function(stream){
video.srcObject = stream;
video.play();
}, function(error){
});
video.addEventListener('play',function() {
draw(this, context, 640, 480);
}, false);
async function draw(video, context, width, height) {
context.drawImage(video, 0, 0, width, height);
if(!model) model = await blazeface.load();
const returnTensors = false;
const predictions = await model.estimateFaces(video, returnTensors);
if (predictions.length > 0) {
console.log(predictions);
for (let i = 0; i < predictions.length; i++) {
const start = predictions[i].topLeft;
const end = predictions[i].bottomRight;
var probability = predictions[i].probability;
const size = [end[0] - start[0], end[1] - start[1]];
context.beginPath();
context.strokeStyle="black";
context.lineWidth = "4";
context.rect(start[0], start[1],size[0], size[1]);
context.stroke();
var prob = (probability[0]*100).toPrecision(5).toString();
if (prob < 99.7) {
document.getElementById("uniqid").innerHTML = ++a;
}
var text = prob+"++";
context.fillStyle = "white";
context.font = "14pt sans-serif";
context.fillText(text,start[0]+5,start[1]+20);
}
}
setTimeout(draw,250,video,context,width,height);
}
})();
Dheirya_Tyagi_CEO ~ May 23
Cool! I actually tried to make my own face-tracker (it isn't that good): https://srcamera.netlify.app