xxxxxxxxxx
309
// this sketch implements recording and layering
// of movement data
let textInput;
let recordingLength = 10000; // 5 s
let recordings = [];
let newRecording = {
phrase: "like a sparrow in the wind",
posesAndTimes: [],
phraseColor: {
r: Math.random() * 255,
g: Math.random() * 255,
b: Math.random() * 255,
},
};
let recordingStart = Date.now() + 10;
let keypointsToUse = ["nose", "left_wrist", "right_wrist"];
let recordingPlaybackLoopStart = 0;
let textResponse = "like a sparrow in the rain";
let textResponseArr = ["like", "a", "sparrow", "in", "the", "rain"];
let prevPoses = [];
let videoScale = 1;
let textOffset = 0;
let video;
let bodyPose;
let poses = [];
let connections;
function preload() {
// Load the bodyPose model
bodyPose = ml5.bodyPose("BlazePose", {
flipped: true,
enableSegmentation: true,
smoothSegmentation: true,
});
}
function setup() {
let label = createDiv("How does AI make you feel in your body?");
textInput = createInput("like a sparrow in the wind");
let aspect = 640 / 480;
createCanvas(window.innerWidth, window.innerHeight);
// Create the video and hide it
video = createCapture(VIDEO);
video.size(640, 480);
video.hide();
background(0);
// set angles to degrees
angleMode(DEGREES);
// Start detecting poses in the webcam video
bodyPose.detectStart(video, gotPoses);
// Get the skeleton connection information
connections = bodyPose.getSkeleton();
}
function drawSkeletons() {
let numX = 10;
let numY = 10;
let scaleFactorX = width / 640;
let scaleFactorY = height / 480;
push();
scale(scaleFactorX, scaleFactorY);
push();
for (let q = 0; q < numX; q++) {
for (let w = 0; w < numY; w++) {
push();
translate(q * (width / numX), w * (height / numY));
scale(1 / numX, 1 / numY);
// Draw the skeleton connections
for (let i = 0; i < poses.length; i++) {
let pose = poses[i];
for (let j = 0; j < connections.length; j++) {
let pointAIndex = connections[j][0];
let pointBIndex = connections[j][1];
let pointA = pose.keypoints[pointAIndex];
let pointB = pose.keypoints[pointBIndex];
// Only draw a line if both points are confident enough
if (pointA.confidence > 0.1 && pointB.confidence > 0.1) {
stroke(255, 255, 255);
strokeWeight(2);
line(pointA.x, pointA.y, pointB.x, pointB.y);
}
}
}
pop();
}
// scale(0.5, 0.5);
}
pop();
pop();
}
function showDebugInfo() {
// debug recording timer
let timeToRecordingEnd = Math.round((Date.now() - recordingStart) / 1000);
push();
fill(0, 0, 0);
rect(0, 0, 200, 30);
fill(255, 255, 255);
textSize(18);
text(
"recording time: " + timeToRecordingEnd + "/ " + recordingLength / 1000,
20,
20
);
pop();
}
function drawRecordings() {
let scaleFactorX = width / 640;
let scaleFactorY = height / 480;
push();
scale(scaleFactorX, scaleFactorY);
let now = Date.now();
for (let i = 0; i < recordings.length; i++) {
let recording = recordings[i];
// each recording has an arbitrary number of points with a timestamp
// show the pose we've just passed
for (let q = 1; q < recording.posesAndTimes.length - 1; q++) {
if (
recordingPlaybackLoopStart + recording.posesAndTimes[q].timeSinceStart <
now &&
recordingPlaybackLoopStart +
recording.posesAndTimes[q + 1].timeSinceStart >
now
) {
let pose = recording.posesAndTimes[q].pose;
let prevPose = recording.posesAndTimes[q - 1].pose;
let textPhrase = recording.phrase;
let textColor = recording.phraseColor;
for (let j = 0; j < pose.keypoints.length; j++) {
let keypoint = pose.keypoints[j];
if (!keypointsToUse.includes(keypoint.name)) continue;
let prevKeypoint = prevPose.keypoints[j];
// Only draw a circle if the keypoint's confidence is bigger than 0.1
if (keypoint.confidence > 0.1) {
let v1 = createVector(
prevKeypoint.x - keypoint.x,
prevKeypoint.y - keypoint.y
);
let distanceToPreviousKeypoint = v1.mag();
// let v2 = createVector(keypoint.x,keypoint.y);
// let angle = v1.angleBetween(v2)
let angle = v1.heading();
// console.log(distanceToPreviousKeypoint);
push();
noStroke();
textSize(18);
translate(keypoint.x, keypoint.y);
// rotate(angle);
fill(textColor.r, textColor.g, textColor.b);
let textResponseArr = textPhrase.split(" ");
let oneWord =
textResponseArr[(j + textOffset) % textResponseArr.length];
let phrasePart = textResponseArr.slice(
(j + textOffset) % textResponseArr.length,
((j + textOffset) % textResponseArr.length) + 3
);
let phrasePartWithSpaces = "";
phrasePart.forEach((word) => {
phrasePartWithSpaces += word + " ";
});
if (distanceToPreviousKeypoint > 10) {
text(oneWord, 0, 0);
}
pop();
}
}
} else {
// console.log('no pose available');
}
}
}
pop();
}
function drawCurrentPoses() {
let scaleFactorX = width / 640;
let scaleFactorY = height / 480;
push();
scale(scaleFactorX, scaleFactorY);
// Draw all the tracked landmark points
for (let i = 0; i < poses.length; i++) {
let pose = poses[i];
let prevPose = prevPoses[i];
for (let j = 0; j < pose.keypoints.length; j++) {
if (prevPoses.length) {
let keypoint = pose.keypoints[j];
if (!keypointsToUse.includes(keypoint.name)) continue;
let prevKeypoint = prevPose.keypoints[j];
if (keypoint.confidence > 0.5) {
noStroke();
textSize(18);
let v1 = createVector(
prevKeypoint.x - keypoint.x,
prevKeypoint.y - keypoint.y
);
let distanceToPreviousKeypoint = v1.mag();
// let v2 = createVector(keypoint.x,keypoint.y);
// let angle = v1.angleBetween(v2)
let angle = v1.heading();
// console.log(distanceToPreviousKeypoint);
push();
translate(keypoint.x, keypoint.y);
// rotate(angle);
let textResponseArr = textInput.value().split(" ");
let oneWord =
textResponseArr[(j + textOffset) % textResponseArr.length];
let phrasePart = textResponseArr.slice(
(j + textOffset) % textResponseArr.length,
((j + textOffset) % textResponseArr.length) + 3
);
let phrasePartWithSpaces = "";
phrasePart.forEach((word) => {
phrasePartWithSpaces += word + " ";
});
// if (distanceToPreviousKeypoint > 10) {
text(oneWord, 0, 0);
// }
pop();
}
}
}
prevPoses = poses;
}
pop();
}
function drawWebcamVideo(){
// Draw the webcam video
push();
translate(width, 0);
scale(-1, 1);
tint(255, 5);
image(video, 0, 0, width, height);
pop();
}
function draw() {
drawWebcamVideo();
// draw background
background(0, 0, 0, 10);
showDebugInfo();
drawSkeletons();
drawRecordings();
drawCurrentPoses();
}
// Callback function for when bodyPose outputs data
function gotPoses(results) {
// Save the output to the poses variable
poses = results;
saveRecording();
}
function saveRecording() {
for (let i = 0; i < poses.length; i++) {
let pose = poses[i];
// save to recordings
let now = Date.now();
if (now - recordingStart > recordingLength) {
// start a new recording
if (newRecording.posesAndTimes.length > 10) {
recordings.push(newRecording);
}
// recordings.push(structuredClone(newRecording));
newRecording = {
phrase: textInput.value(),
posesAndTimes: [],
phraseColor: {
r: Math.random() * 255,
g: Math.random() * 255,
b: Math.random() * 255,
},
};
recordingStart = now;
recordingPlaybackLoopStart = now;
console.log(recordings);
}
newRecording.posesAndTimes.push({
timeSinceStart: Date.now() - recordingStart,
pose: pose,
});
}
}