xxxxxxxxxx
79
// declaring size variables for the canvas
let CANVAS_WIDTH = 640 * 2;
let CANVAS_HEIGHT = 480;
let video_feed; // variable for video feed
let handpose; // hand pose model
let hand = []; // result from hand pose detection
function setup() {
// creates canvas
createCanvas(CANVAS_WIDTH, CANVAS_HEIGHT);
// storing video feed to video_feed variable
video_feed = createCapture(VIDEO);
// video_feed.size(CANVAS_WIDTH, CANVAS_HEIGHT);
// prevents duplicate feed on the canvas
video_feed.hide();
// loading handpose model from ml5
handpose = ml5.handpose(video_feed, modelLoaded);
handpose.on('hand', results => {
hand = results;
})
}
function draw() {
// setting background color
background(255);
// scaling video to avoid distortion
let aspectRatio = video_feed.height/video_feed.width;
// mirroring feed from the webcam
// -- beginning of transformation
push();
translate(width, 0);
scale(-1, 1);
image(video_feed, width / 2, 0, width / 2, (width / 2) * aspectRatio);
drawKeypoints();
drawBoundingBox();
pop();
// -- ending of transformation
}
function modelLoaded() {
print("Model loaded");
}
// A function to draw ellipses over the detected keypoints
function drawKeypoints() {
for (let i = 0; i < hand.length; i += 1) {
const prediction = hand[i];
for (let j = 0; j < prediction.landmarks.length; j += 1) {
const keypoint = prediction.landmarks[j];
fill(0, 255, 0);
noStroke();
ellipse(keypoint[0], keypoint[1] , 10, 10);
print(keypoint[2]);
}
}
}
// A function to draw bounding box of the detected hand
function drawBoundingBox() {
for (let i = 0; i < hand.length; i++) {
const prediction = hand[i];
let x = prediction.boundingBox.topLeft[0];
let y = prediction.boundingBox.topLeft[1];
let w = prediction.boundingBox.bottomRight[0] - prediction.boundingBox.topLeft[0];
let h = prediction.boundingBox.bottomRight[1] - prediction.boundingBox.topLeft[1];
noFill();
stroke('pink');
rect(x, y, w, h);
}
}