xxxxxxxxxx
75
let video_feed; // variable for video feed
let videoScale = 5; // variable to scale the video feed
let handpose; // hand pose model
let hand = []; // result from hand pose detection
let points = []; // array of points created
function setup() {
// creates canvas
createCanvas(windowWidth, windowHeight);
// storing video feed to video_feed variable
video_feed = createCapture(VIDEO);
// video_feed.size(CANVAS_WIDTH, CANVAS_HEIGHT);
// prevents duplicate feed on the canvas
video_feed.hide();
// loading handpose model from ml5
handpose = ml5.handpose(video_feed, modelLoaded);
handpose.on('hand', results => {
hand = results;
})
}
function draw() {
// setting background color
background(255);
// scaling video to avoid distortion
let aspectRatio = video_feed.height/video_feed.width;
// mirroring feed from the webcam
// -- beginning of transformation
push();
translate(width, 0);
scale(-1, 1);
loadKeyPoints();
drawKeyPoints();
image(video_feed, width - ( width / videoScale ), 0, width / videoScale , (width /videoScale) * aspectRatio);
pop();
// -- ending of transformation
}
// function to resize the canvas
function windowResized() {
resizeCanvas(windowWidth, windowHeight);
}
function modelLoaded() {
print("Model loaded");
}
// A function to create points for the detected keypoints
function loadKeyPoints() {
for (let i = 0; i < hand.length; i += 1) {
const prediction = hand[i];
for (let j = 0; j < prediction.landmarks.length; j += 1) {
const keypoint = prediction.landmarks[j];
let x = map(keypoint[0], 0, video_feed.width, 0, width);
let y = map(keypoint[1], 0, video_feed.height, 0 , height);
points.push(new Point(x, y, keypoint[2], prediction.handInViewConfidence));
}
}
}
// function to draw detected keypoints
function drawKeyPoints() {
for (const point of points) {
point.draw();
}
}