xxxxxxxxxx
105
let handpose;
let video;
let predictions = [];
let thumb, indexFinger;
let myNN, myNnResults;
const options = {
task: "classification",
debug: true,
};
function setup() {
createCanvas(640, 480);
video = createCapture(VIDEO);
video.size(width, height);
textSize(24);
myNN = ml5.neuralNetwork(options);
handpose = ml5.handpose(video, modelReady);
// This sets up an event that fills the global variable "predictions"
// with an array every time new hand poses are detected
handpose.on("predict", (results) => {
predictions = results;
if (results[0]) {
// console.log(results[0]);
thumb = results[0].annotations.thumb[3];
indexFinger = results[0].annotations.indexFinger[3];
}
});
// Hide the video element, and just show the canvas
video.hide();
}
function keyPressed() {
if (keyCode === 79) {
// 'O'
myNN.addData(
[thumb[0], thumb[1], indexFinger[0], indexFinger[1]],
["open"]
);
} else if (keyCode === 67) {
// 'C'
myNN.addData(
[thumb[0], thumb[1], indexFinger[0], indexFinger[1]],
["close"]
);
} else if (keyCode === 84) {
// 't' start training
myNN.normalizeData();
myNN.train({ epochs: 50 }, trainingDone);
}
}
function trainingDone() {
myNN.classify(
[thumb[0], thumb[1], indexFinger[0], indexFinger[1]],
gotResults
);
}
function gotResults(error, nnResults) {
if (error) console.log("error", error);
if (nnResults) {
myNnResults = nnResults;
myNN.classify(
[thumb[0], thumb[1], indexFinger[0], indexFinger[1]],
gotResults
);
}
}
function modelReady() {
console.log("Model ready!");
}
function draw() {
image(video, 0, 0, width, height);
// We can call both functions to draw all keypoints and the skeletons
drawKeypoints();
if (myNnResults) {
text(
myNnResults[0].label + myNnResults[0].confidence.toFixed(2),
width / 2,
height / 2
);
}
}
// A function to draw ellipses over the detected keypoints
function drawKeypoints() {
if (thumb && indexFinger) {
fill(255, 255, 0);
ellipse(thumb[0], thumb[1], 30, 30);
fill(255, 0, 0);
ellipse(indexFinger[0], indexFinger[1], 30, 30);
}
}