<aside> 💡 Description: Examples of audible human voice and speech recognition which let user control 3D object’s animation. Optimisation included. Based on Web Speech API
</aside>
video-output-8270EA86-5491-4F41-AADA-D2132F2B8969.MOV
Add model to the Scene:
Insert custom code below by clicking on {}
icon:
// ======= Animation Engine ================
// make sure you use Mesh/Object with animations[]
const animatedObject = this.object3D.children[0];
const clock = new this.THREE.Clock();
const mixer = new this.THREE.AnimationMixer(animatedObject);
const clips = animatedObject.animations;
let started = false;
let animated = false;
// ======= Animation Engine ================
// Animation engine will be avaliable via SDK very soon.
// TODO: rewrite playAnim / stopAnim / stopAllAnims / animAction / crossFade to SDK commands
let activeAnim = 'standing';
const playAnim = (name) => {
stopAllAnims();
clips.forEach( ( clip ) => {
if (clip.name === name) mixer.clipAction( name ).play();
} );
activeAnim = name;
}
const stopAnim = (name) => {
clips.forEach( ( clip ) => {
if (clip.name === name) mixer.clipAction( name ).stop();
} );
}
const stopAllAnims = () => {
clips.forEach( ( clip ) => {
mixer.clipAction( clip ).stop();
} );
}
const animAction = (name) => {
const clip = this.THREE.AnimationClip.findByName( clips, name );
return mixer.clipAction( clip );
}
const crossFade = (fromAction, toAnim, time) => {
if (activeAnim === toAnim) return;
animated = (toAnim !== 'standing' && toAnim !== 'sitting') ? true : false;
animAction(fromAction).play();
animAction(fromAction).crossFadeTo( animAction(toAnim), time );
animAction(toAnim).play();
setTimeout(() => animAction(fromAction).stop(), time * 1000);
activeAnim = toAnim;
}
const setupAnimBehavior = () => {
mixer.addEventListener( 'loop', ( e ) => {
crossFade(activeAnim, 'standing', 0.15);
animated = false;
} );
animAction(activeAnim).play(); // default activeAnim = 'standing'; // 'sitting'
}
// ======= Animation Engine ================
// =========== render loop ===========
const render = () => {
const delta = clock.getDelta();
mixer.update( delta );
};
// Reassign to Geenee Render Loop
this.activeSceneModel.userCallbacks.onRender = render;
// =========== render loop ===========
// List of voice commands options. Desiered word should be first in Array
const sitC = ['sit', 'set', 'seat', 'sick', 'see', 'down', 'city']; // run anumation "sitting"
const upC = ['up', 'app', 'opt', 'standup']; // run anumation "standing"
const shakeC = ['shake', 'snake', 'sake', 'shape']; // run anumation "shake"
const rolloverC = ['rollover', 'roll', 'over', 'roller']; // run anumation "rollover"
const playdeadC = ['playdead', 'play', 'dead', 'dad', 'playgnat', 'add']; // run anumation "play_dead"
const commandsList = [sitC, upC, shakeC, rolloverC, playdeadC];
// input animation word trigger -> run coresponding animation
const commands = {
'shake': () => {
crossFade(activeAnim, 'shake', 1.0);
},
'sit': () => {
crossFade(activeAnim, 'sitting', 1.0);
},
'up': () => {
crossFade(activeAnim, 'standing', 1.0);
},
'rollover': () => {
crossFade(activeAnim, 'rollover', 1.0);
},
'playdead': () => {
crossFade(activeAnim, 'play_dead', 1.0);
}
};
const runCommand = (commandName) => {
// split voice command by [space] to find at least one word relative to animation commands
const inputCommandsArr = commandName.toLowerCase().split(' ');
inputCommandsArr.forEach(command => {
// check if it exist in initial command list.
commandsList.forEach(list => {
if (list.includes(command)) {
// if so - run the same animation name as first element in the coresponding list.
commands[list[0]]();
return;
}
})
})
}
// ===== Speech Recognition Logic =========
if ("webkitSpeechRecognition" in window) {
let speechRecognition = new webkitSpeechRecognition();
let final_transcript = "";
speechRecognition.continuous = true;
speechRecognition.interimResults = true;
// setup default language
speechRecognition.lang = 'en-US';
// speechRecognition.onstart = () => {}; // on start callback
speechRecognition.onerror = () => {
alert("Microphone is disabled. Go to Settings -> Safari -> Microphone -> Allow. Restart the page");
}; // on error callback
// restart recognition after auto stop
speechRecognition.onend = () => {
speechRecognition.start();
};
speechRecognition.onresult = (event) => {
let interim_transcript = "";
if (animated) return;
for (let i = event.resultIndex; i < event.results.length; ++i) {
if (event.results[i].isFinal) {
// final result
final_transcript = event.results[i][0].transcript;
runCommand(final_transcript);
} else {
// temporaty results
interim_transcript = event.results[i][0].transcript;
runCommand(interim_transcript);
}
}
};
this.activeSceneModel.$parent.emitter.addListener('geenee-model-placed', () => {
// start recognition and first animation after model placed
speechRecognition.start();
setupAnimBehavior();
});
} else {
alert("Speech Recognition Not Available. Go to Settings -> Safari/Chrome -> Microphone -> Allow");
}
// ===== Speech Recognition Logic ========