xxxxxxxxxx
122
/*
* 👋 Hello! This is an ml5.js example made and shared with ❤️.
* Learn more about the ml5.js project: https://ml5js.org/
* ml5.js license and Code of Conduct: https://github.com/ml5js/ml5-next-gen/blob/main/LICENSE.md
*
* This example demonstrates drawing skeletons on poses for the MoveNet model.
*/
let textResponse = "like a sparrow in the rain";
let textResponseArr = ["like","a","sparrow","in","the","rain"];
let prevPoses = [];
let videoScale = 1;
let textOffset = 0;
let video;
let bodyPose;
let poses = [];
let connections;
function preload() {
// Load the bodyPose model
bodyPose = ml5.bodyPose();
}
function setup() {
let aspect = 640/480;
createCanvas(windowWidth, windowWidth/aspect);
// Create the video and hide it
video = createCapture(VIDEO);
video.size(640, 480);
video.hide();
background(0);
// set angles to degrees
angleMode(DEGREES)
// Start detecting poses in the webcam video
bodyPose.detectStart(video, gotPoses);
// Get the skeleton connection information
connections = bodyPose.getSkeleton();
}
function draw() {
// if (frameCount % 50 == 0) textOffset += 1;
// Draw the webcam video
// image(video, 0, 0, width, height);
background(0,0,0,10);
// Draw the skeleton connections
for (let i = 0; i < poses.length; i++) {
let pose = poses[i];
for (let j = 0; j < connections.length; j++) {
let pointAIndex = connections[j][0];
let pointBIndex = connections[j][1];
let pointA = pose.keypoints[pointAIndex];
let pointB = pose.keypoints[pointBIndex];
// Only draw a line if both points are confident enough
if (pointA.confidence > 0.1 && pointB.confidence > 0.1) {
stroke(255, 255, 255);
strokeWeight(2);
// line(pointA.x, pointA.y, pointB.x, pointB.y);
}
}
}
// Draw all the tracked landmark points
for (let i = 0; i < poses.length; i++) {
let pose = poses[i];
let prevPose = prevPoses[i]
for (let j = 0; j < pose.keypoints.length; j++) {
if (prevPoses.length){
let keypoint = pose.keypoints[j];
let prevKeypoint = prevPose.keypoints[j]
// Only draw a circle if the keypoint's confidence is bigger than 0.1
if (keypoint.confidence > 0.1) {
fill(sin(frameCount / 100) * 255, 255, 255);
strokeWeight(2);
// circle(keypoint.x, keypoint.y, 10);
// line(keypoint.x, keypoint.y, prevKeypoint.x, prevKeypoint.y);
noStroke();
textSize(18)
let v1 = createVector(prevKeypoint.x - keypoint.x,prevKeypoint.y - keypoint.y);
let distanceToPreviousKeypoint = v1.mag();
// let v2 = createVector(keypoint.x,keypoint.y);
// let angle = v1.angleBetween(v2)
let angle = v1.heading();
console.log(distanceToPreviousKeypoint);
push();
translate(keypoint.x, keypoint.y);
// rotate(angle);
let oneWord = textResponseArr[(j + textOffset) % textResponseArr.length]
let phrasePart = textResponseArr.slice((j + textOffset) % textResponseArr.length,(j + textOffset) % textResponseArr.length+3)
let phrasePartWithSpaces = "";
phrasePart.forEach(word => {
phrasePartWithSpaces += word + " "
})
if (distanceToPreviousKeypoint > 10){
text(oneWord, 0,0)
}
pop();
}
}
}
prevPoses = poses;
}
}
// Callback function for when bodyPose outputs data
function gotPoses(results) {
// Save the output to the poses variable
poses = results;
}