xxxxxxxxxx
338
// Variables: ====================================================================================================
// HandPose Model and video
let handpose;
let video;
let hands = [];
let thumbX = 0, thumbY = 0;
let indexX = 640, indexY = 480;
let midX, midY;
let pinch;
let pinchBool;
// The ML5 Network
let brain;
let state = "collection";
let TrainedModelFrequency;
// These are variables regarding the notes
let notes = {
C: 261.6256,
D: 293.6648,
E: 329.6276,
F: 349.2282,
G: 391.9954,
A: 440.0000,
B: 493.8833
}
let label = 'C';
let particles = [];
// These are variables used for sound generation.
let env, wave;
let pinchSoundGate = false;
// Teachable Machine Variables:
const TM_URL = 'https://teachablemachine.withgoogle.com/models/VFCmbXm90/'
let NN_TrainingSoundModel;
let bgNoiseBool;
let collectDataBool;
let trainDataBool;
let saveDataBool;
let debuggingBool = false;
// Console Loading Models ==========================================================================================
//Preloads the model to myModel
function preload() {
NN_TrainingSoundModel = ml5.soundClassifier(TM_URL + 'model.json', gotSoundModel);
}
//Logs the model onto the console when loaded.
function gotSoundModel() {
console.log(NN_TrainingSoundModel);
}
// Function to display that the model loaded.
function handPoseModelLoaded() {
console.log("Handpose ready");
}
// SETUP ==========================================================================================================
function setup() {
createCanvas(640, 480);
// Start the video and hide it
video = createCapture(VIDEO);
video.size(640, 480);
video.hide();
InitializeSoundSettings();
// Load the handpose model and predict the positions
handpose = ml5.handpose(video, handPoseModelLoaded);
handpose.on("predict", gotPose);
// Create an ml5 neural network
let options = {
task: "regression",
debug: true
}
// Initialize the neural network with the options mentioned.
brain = ml5.neuralNetwork(options);
// Classify the sound model to listen for training commands.
NN_TrainingSoundModel.classify(gotSoundResults);
}
// Function to set up the settings for the sounds when pinched.
function InitializeSoundSettings() {
env = new p5.Envelope();
env.setADSR(0.05, 0.1, 0.5, 1);
env.setRange(1.2, 0);
wave = new p5.Oscillator();
wave.setType('sine');
wave.start();
wave.freq(440);
wave.amp(env);
}
// Outputting Models ==========================================================================================
// Function used to classify the sounds for training data from Teachable Machine
function gotSoundResults(error, results) {
//If an error has occured, it will print out in the console.
if (error) {
console.error(error);
}
if (results && debuggingBool) {
// console.log(results);
myResults = results;
// If it's background noise.
if (myResults[0].label === 'Background Noise') {
if(myResults[0].confidence > 0.9) {
bgNoiseBool = true;
collectDataBool = false;
console.log("Stopped Collecting.");
}
}
// If the user whistles, it will activate the data collection bool
else if (myResults[0].label === 'Collect') {
if(myResults[0].confidence > 0.9) {
bgNoiseBool = false;
collectDataBool = true;
}
}
// If the user snaps their fingers, it activates the train data bool
else if (myResults[0].label === 'Train') {
if(myResults[0].confidence > 0.9) {
bgNoiseBool = false;
trainDataBool = true;
}
}
// If the user makes a 'pop' sound using their mouth, it activates the save data bool.
else if (myResults[0].label === 'Save') {
if(myResults[0].confidence > 0.9) {
bgNoiseBool = false;
saveDataBool = true;
}
}
}
}
// If there is a hand, we get the coordinates of the thumb and index finger
function gotPose(results) {
hands = results;
if (hands.length > 0) {
thumbX = results[0].annotations.thumb[3][0];
thumbY = results[0].annotations.thumb[3][1];
indexX = results[0].annotations.indexFinger[3][0];
indexY = results[0].annotations.indexFinger[3][1];
}
}
// Helper Functions for DRAW =====================================================================================
// Helper function to draw all the keypoints on the handpose model
function drawKeyPoints() {
fill(0, 255, 0);
noStroke();
circle(indexX, indexY, 10);
circle(thumbX, thumbY, 10);
}
// This simply draws the grid to seperate which notes you can play where.
function drawGrid() {
fill(0);
stroke(1);
strokeWeight(3);
// Horizontal Lines:
line(0, height/3, width, height/3);
line(0, 2*height/3, width, 2*height/3);
// Vertical Lines:
line(width/3, 0, width/3, height);
line(2*width/3, 0, 2*width/3, height);
}
function drawNote() {
midX = (indexX + thumbX)/2;
midY = (indexY + thumbY)/2;
let inputs = [midX, midY];
let p = new Particle(midX, midY);
particles.push(p);
//Whenever we pinch, if we're collecting data, and the collect data button is pressed (pressing p or whistle)
if(state == "collection") {
pinchSounds(notes[label]);
if(collectDataBool) {
CollectData(inputs);
}
}
else if(state == "prediction") {
brain.predict(inputs, gotResults);
if(TrainedModelFrequency) {
console.log(TrainedModelFrequency);
pinchSounds(TrainedModelFrequency);
}
}
}
function pinchSounds(pitch) {
wave.freq(pitch);
env.play();
}
// DRAW FUNCTION ============================================================================================
function draw() {
// Draw the webcam video
image(video, 0, 0, width, height);
drawGrid();
drawKeyPoints();
// Calculating the distance of the finger coordinates in real time.
pinch = dist(indexX, indexY, thumbX, thumbY);
if(pinch > 45) {pinchSoundGate = true; }
if(pinchSoundGate) {
if(pinch < 45) {pinchBool = true; }
else {pinchBool = false; }
}
if(pinchBool) {
pinchSoundGate = false;
pinchBool = false;
drawNote();
}
if(trainDataBool) {
state = "training";
TrainData();
}
if(saveDataBool) {SaveData(); }
for (let i = particles.length - 1; i >= 0; i--) {
particles[i].update();
particles[i].show();
if (particles[i].finished()) {
particles.splice(i, 1);
}
}
}
// 1) Data Collection =====================================================================================
// In order to collect data, the user has to whistle constantly
function CollectData(inputs) {
let targetFrequency = notes[label];
let target = {frequency: targetFrequency};
brain.addData(inputs, target);
console.log("Collecting...");
}
function keyPressed() {
if(key == 'a') {label = 'A'; }
if(key == 'b') {label = 'B'; }
if(key == 'c') {label = 'C'; }
if(key == 'd') {label = 'D'; }
if(key == 'e') {label = 'E'; }
if(key == 'f') {label = 'F'; }
if(key == 'g') {label = 'G'; }
//Backup buttons
if(key == 'p') {
if(collectDataBool) {
collectDataBool = false;
console.log("Stopped Collecting.");
}
else {collectDataBool = true; }
}
if(key == 'o') {trainDataBool = true; }
if(key == 'i') {saveDataBool = true; }
}
// 2) Training Data ========================================================================================
// This function is called when the user snaps their fingers.
function TrainData() {
brain.normalizeData();
brain.train({epochs: 20}, finishedTraining);
console.log("Training Data...");
trainDataBool = false;
}
// If the user makes a pop sound, it activates this statement.
function SaveData() {
brain.saveData('Coordinates_Pitches_Dataset');
console.log("Saving Data...");
saveDataBool = false;
}
// Start classification when model is finished training
function finishedTraining() {
console.log('Finished Training');
state = "prediction";
}
// 3) Deploying Data ========================================================================================
// Just log the results of model
function gotResults(error, results) {
if(error) {
return;
}
if(results){
TrainedModelFrequency = results[0].frequency;
}
}
// Particle Class ========================================================================================
class Particle {
constructor(midX, midY) {
this.x = midX;
this.y = midY
this.alpha = 255;
this.d = 30;
}
finished() {
return this.alpha < 0;
}
update() {
this.alpha -= 10;
this.d -= 5;
}
show() {
noStroke();
fill(random(225, 255), random(225, 255), 0, this.alpha);
ellipse(this.x, this.y, this.d);
}
}