|
|
@@ -1,23 +1,26 @@ |
|
|
|
// web speech recognition api |
|
|
|
// #region web speech recognition api |
|
|
|
var SpeechRecognition = SpeechRecognition || webkitSpeechRecognition; |
|
|
|
var SpeechGrammarList = SpeechGrammarList || webkitSpeechGrammarList; |
|
|
|
var SpeechRecognitionEvent = SpeechRecognitionEvent || webkitSpeechRecognitionEvent; |
|
|
|
// #endregion |
|
|
|
|
|
|
|
// state management |
|
|
|
// #region state management |
|
|
|
var state = ''; |
|
|
|
var question = 0; |
|
|
|
var rePrompt = false; |
|
|
|
var partTwo = false; |
|
|
|
var count = 0; |
|
|
|
var questionThreeCount = 0; |
|
|
|
var strike = 0; |
|
|
|
// #endregion |
|
|
|
|
|
|
|
// Questions |
|
|
|
// #region questions |
|
|
|
const QUESTION_ONE = 'Ich werde Ihnen jetzt langsam eine Liste mit Worten vorlesen. Danach wiederholen Sie bitte möglichst viele dieser Worte. Auf die Reihenfolge kommt es nicht an.'; |
|
|
|
const QUESTION_ONE_PT2 = 'Vielen Dank. Nun nenne ich Ihnen die gleichen 10 Worte ein zweites mal. Auch danach sollen Sie wieder möglichst viele Worte wiederholen'; |
|
|
|
const QUESTION_TWO = 'Nennen Sie mir bitte so viel Dinge wie möglich, die man im Supermarkt kaufen kann. Sie haben dafür eine Minute Zeit.'; |
|
|
|
const QUESTION_TWO = 'Nennen Sie mir bitte so viel Dinge wie möglich, die man im Supermarkt kaufen kann. Sie haben dafür eine Minute Zeit. Und Los'; |
|
|
|
const QUESTION_THREE = 'Ich werde Ihnen jetzt eine Zahlenreihe nennen, die Sie mir dann bitte in umgekehrter Reihenfolge wiederholen sollen. Wenn ich beispielsweise, vier - fünf sage, dann sagen Sie bitte, fünf - vier.'; |
|
|
|
// #endregion |
|
|
|
|
|
|
|
// intents |
|
|
|
// #region intents |
|
|
|
const WELCOME_INTENT = 'Default Welcome Intent'; |
|
|
|
const WELCOME_FOLLOWUP_YES = 'Default Welcome Intent - yes'; |
|
|
|
const WELCOME_FOLLOWUP_NO = 'Default Welcome Intent - no'; |
|
|
@@ -29,24 +32,48 @@ const FALLBACK_INTENT = 'Default Fallback Intent'; |
|
|
|
const HELP_INTENT = 'Help Intent'; |
|
|
|
const CHOOSE_QUESTION = 'Frage_Starten'; |
|
|
|
const NEXT_QUESTION = 'Nächste Frage'; |
|
|
|
// #endregion |
|
|
|
|
|
|
|
// question one results |
|
|
|
const QUESTION_ONE_ANSWERS = ['teller', 'hund', 'lampe', 'brief', 'apfel', 'apfelwiese', 'apfelwiese', 'apfelbaum', 'apfelbaum', 'und', 'hose', 'tisch', 'wiese', 'glas', 'baum']; |
|
|
|
// #region questions and expected results |
|
|
|
const QUESTION_ONE_ANSWERS = { 'teller': 1, 'hund': 1, 'lampe': 1, 'brief': 1, 'apfel': 1, 'apfelwiese': 2, 'apfelbaum': 2, 'und': 1, 'hose': 1, 'tisch': 1, 'wiese': 1, 'glas': 1, 'baum': 1 }; |
|
|
|
const QUESTION_ONE_QUESTIONS = ['teller', 'hund', 'lampe', 'brief', 'apfel', 'hose', 'tisch', 'wiese', 'glas', 'baum']; |
|
|
|
const QUESTION_TWO_ANSWERS = ['']; |
|
|
|
const QUESTION_TWO_QUESTIONS = ['']; |
|
|
|
const QUESTION_TWO_ANSWERS = {}; |
|
|
|
var QUESTION_TWO_QUESTIONS = ['']; |
|
|
|
const QUESTION_THREE_QUESTIONS_PT1 = ['7, 2', '4, 7, 9', '5, 4, 9, 6', '2, 7, 5, 3, 6', '8, 1, 3, 5, 4, 2']; |
|
|
|
const QUESTION_THREE_QUESTIONS_PT2 = ['8, 6', '3, 1, 5', '1, 9, 7, 4', '1, 3, 5, 4, 8', '4, 1, 2, 7, 9, 5']; |
|
|
|
const QUESTION_THREE_ANSWERS_PT1 = ['27', '974', '6945', '63572', '245318']; |
|
|
|
const QUESTION_THREE_ANSWERS_PT2 = ['68', '513', '4791', '84531', '597214']; |
|
|
|
LoadQuestionTwo(); |
|
|
|
function LoadQuestionTwo () { |
|
|
|
var xmlhttp; |
|
|
|
if (window.XMLHttpRequest) { // code for IE7+, Firefox, Chrome, Opera, Safari |
|
|
|
xmlhttp = new XMLHttpRequest(); |
|
|
|
} else { // code for IE6, IE5 |
|
|
|
xmlhttp = new ActiveXObject('Microsoft.XMLHTTP'); |
|
|
|
} |
|
|
|
xmlhttp.onreadystatechange = function () { |
|
|
|
if (xmlhttp.readyState === 4 && xmlhttp.status === 200) { |
|
|
|
var text = xmlhttp.responseText.toLowerCase(); |
|
|
|
// Now convert it into array using regex |
|
|
|
QUESTION_TWO_QUESTIONS = text.split('\r\n'); |
|
|
|
for (let word of QUESTION_TWO_QUESTIONS) { |
|
|
|
QUESTION_TWO_ANSWERS[word] = 1; |
|
|
|
} |
|
|
|
} |
|
|
|
}; |
|
|
|
xmlhttp.open('GET', 'lebensmittel.txt', true); |
|
|
|
xmlhttp.send(); |
|
|
|
} |
|
|
|
// #endregion |
|
|
|
|
|
|
|
// points |
|
|
|
// #region points |
|
|
|
var questionOnePoints = 0; |
|
|
|
var questionTwoPoints = 0; |
|
|
|
var questionThreePoints = 0; |
|
|
|
var questionFourPoints = 0; |
|
|
|
var questionFivePoints = 0; |
|
|
|
var questionSixPoints = 0; |
|
|
|
// #endregion |
|
|
|
|
|
|
|
// tokenization |
|
|
|
const separators = [' ', '\\\+', '-', '\\\(', '\\\)', '\\*', '/', ':', '\\\?']; |
|
|
@@ -54,20 +81,19 @@ const separators = [' ', '\\\+', '-', '\\\(', '\\\)', '\\*', '/', ':', '\\\?']; |
|
|
|
// Timers |
|
|
|
var timerId; |
|
|
|
|
|
|
|
// html elements |
|
|
|
// #region html elements |
|
|
|
var serverPara = document.querySelector('.server'); |
|
|
|
var diagnosticPara = document.querySelector('.output'); |
|
|
|
var testBtn = document.querySelector('button'); |
|
|
|
var testBtn2 = document.getElementById('speechBtn'); |
|
|
|
var infoPara = document.getElementById('info'); |
|
|
|
var userPrompt = document.getElementById('query'); |
|
|
|
|
|
|
|
console.log(window.location.host + window.location.pathname); |
|
|
|
// #endregion |
|
|
|
|
|
|
|
// websocket to communicate with the server |
|
|
|
var ws = new WebSocket('ws://localhost:8000/ws'); |
|
|
|
var ws = new WebSocket('ws://' + window.location.host + window.location.pathname + 'ws'); |
|
|
|
|
|
|
|
// speech recognition |
|
|
|
// #region speech recognition initialization |
|
|
|
var recognition = new SpeechRecognition(); |
|
|
|
recognition.lang = 'de-DE'; |
|
|
|
// recognition.interimResults = false; |
|
|
@@ -75,12 +101,15 @@ recognition.maxAlternatives = 1; |
|
|
|
recognition.continuous = true; |
|
|
|
var answerQuery = ''; |
|
|
|
var skipRecording = false; |
|
|
|
// #endregion |
|
|
|
|
|
|
|
// speech synthesis |
|
|
|
// #region speech synthesis initialization |
|
|
|
var speechsynth = new SpeechSynthesisUtterance(); |
|
|
|
var listSpeechsynth = new SpeechSynthesisUtterance(); |
|
|
|
var voices; |
|
|
|
// #endregion |
|
|
|
|
|
|
|
// #region speech events |
|
|
|
window.speechSynthesis.onvoiceschanged = function () { |
|
|
|
voices = window.speechSynthesis.getVoices(); |
|
|
|
voices.forEach(element => { |
|
|
@@ -93,46 +122,29 @@ window.speechSynthesis.onvoiceschanged = function () { |
|
|
|
listSpeechsynth.rate = 0.7; |
|
|
|
}; |
|
|
|
|
|
|
|
function startDemenzScreening () { |
|
|
|
console.log('button clicked'); |
|
|
|
ws.send('starte demenz test'); |
|
|
|
// |
|
|
|
// state = 'answer' |
|
|
|
// recognizeSpeech() |
|
|
|
|
|
|
|
// |
|
|
|
testBtn.disabled = true; |
|
|
|
testBtn.textContent = 'Test in progress'; |
|
|
|
infoPara.textContent = 'wait...'; |
|
|
|
} |
|
|
|
|
|
|
|
function speak (sentence) { |
|
|
|
speechsynth.text = sentence; |
|
|
|
window.speechSynthesis.speak(speechsynth); |
|
|
|
} |
|
|
|
|
|
|
|
function testSpeechOut () { |
|
|
|
console.log('click'); |
|
|
|
// skipRecording = true |
|
|
|
// let utterance = new SpeechSynthesisUtterance() |
|
|
|
// utterance.voice = voices[2] |
|
|
|
// // utterance.rate = 0.75 |
|
|
|
// utterance.text = QUESTION_THREE |
|
|
|
// window.speechSynthesis.speak(utterance) |
|
|
|
// |
|
|
|
// question = 3; |
|
|
|
// startQuestion(question); |
|
|
|
// state = 'answer'; |
|
|
|
speechsynth.text = 'test 123'; |
|
|
|
speechsynth.volume = 1; |
|
|
|
speechsynth.rate = 1; |
|
|
|
console.log(speechsynth); |
|
|
|
window.speechSynthesis.speak(speechsynth); |
|
|
|
console.log(window.speechSynthesis); |
|
|
|
// speak('Niko ist der größte hurn eu west'); |
|
|
|
} |
|
|
|
speechsynth.onend = function (event) { |
|
|
|
switch (question) { |
|
|
|
case 1: |
|
|
|
break; |
|
|
|
case 2: |
|
|
|
break; |
|
|
|
case 3: |
|
|
|
break; |
|
|
|
case 4: |
|
|
|
break; |
|
|
|
case 5: |
|
|
|
break; |
|
|
|
} |
|
|
|
if (!skipRecording) { |
|
|
|
recognizeSpeech(); |
|
|
|
} |
|
|
|
skipRecording = false; |
|
|
|
diagnosticPara = ''; |
|
|
|
console.log('global speech end'); |
|
|
|
}; |
|
|
|
// #endregion |
|
|
|
|
|
|
|
// websocket events |
|
|
|
// #region websocket events |
|
|
|
ws.onopen = function () { |
|
|
|
serverPara.style.background = 'green'; |
|
|
|
serverPara.innerHTML = 'Server online'; |
|
|
@@ -143,22 +155,26 @@ ws.onmessage = function (payload) { |
|
|
|
checkIntent(dialogflowResult); |
|
|
|
document.querySelector('h1').innerHTML = dialogflowResult.intent.displayName; |
|
|
|
}; |
|
|
|
// #endregion |
|
|
|
|
|
|
|
// INTENT HANDLING |
|
|
|
function checkIntent (result) { |
|
|
|
switch (result.intent.displayName) { |
|
|
|
case QUIT_INTENT: |
|
|
|
state = 'quit'; |
|
|
|
if (timerId !== undefined) { |
|
|
|
clearTimeout(timerId); |
|
|
|
} |
|
|
|
skipRecording = true; |
|
|
|
speak('Okay, Danke fürs Benutzen.'); |
|
|
|
speak('Beende die Durchführung.'); |
|
|
|
break; |
|
|
|
case WELCOME_INTENT: |
|
|
|
state = 'detect'; |
|
|
|
// speak(result.fulfillmentText) |
|
|
|
speak('go?'); |
|
|
|
speak('go'); |
|
|
|
break; |
|
|
|
case WELCOME_FOLLOWUP_YES: |
|
|
|
startQuestion(1); |
|
|
|
startQuestion(2); |
|
|
|
break; |
|
|
|
case WELCOME_FOLLOWUP_NO: |
|
|
|
skipRecording = true; |
|
|
@@ -195,25 +211,25 @@ function checkIntent (result) { |
|
|
|
} |
|
|
|
} |
|
|
|
|
|
|
|
// #region question handling |
|
|
|
function startQuestion (number) { |
|
|
|
question = number; |
|
|
|
state = 'answer'; |
|
|
|
handleQuestion(); |
|
|
|
} |
|
|
|
|
|
|
|
// QUESTION HANDLING |
|
|
|
function handleQuestion () { |
|
|
|
switch (question) { |
|
|
|
case 1: |
|
|
|
readQuestionOne(QUESTION_ONE); |
|
|
|
skipRecording = true; |
|
|
|
speak(QUESTION_ONE); |
|
|
|
readQuestionOne(); |
|
|
|
break; |
|
|
|
case 2: |
|
|
|
readQuestionTwo(QUESTION_TWO); |
|
|
|
readQuestionTwo(); |
|
|
|
break; |
|
|
|
case 3: |
|
|
|
// skipRecording = true; |
|
|
|
// speak(QUESTION_THREE); |
|
|
|
speak(QUESTION_THREE_QUESTIONS_PT1[0]); |
|
|
|
readQuestionThree(); |
|
|
|
break; |
|
|
|
case 4: |
|
|
|
break; |
|
|
@@ -222,6 +238,52 @@ function handleQuestion () { |
|
|
|
} |
|
|
|
} |
|
|
|
|
|
|
|
function readQuestionOne () { |
|
|
|
for (let i = 0; i < QUESTION_ONE_QUESTIONS.length; i++) { |
|
|
|
let utterance = new SpeechSynthesisUtterance(); |
|
|
|
utterance.voice = voices[2]; |
|
|
|
utterance.rate = 0.75; |
|
|
|
utterance.text = QUESTION_ONE_QUESTIONS[i]; |
|
|
|
window.speechSynthesis.speak(utterance); |
|
|
|
if (i === 9) { |
|
|
|
utterance.onend = function (event) { |
|
|
|
recognizeSpeech(); |
|
|
|
}; |
|
|
|
} |
|
|
|
} |
|
|
|
} |
|
|
|
|
|
|
|
function readQuestionTwo () { |
|
|
|
let utterance = new SpeechSynthesisUtterance(); |
|
|
|
utterance.voice = voices[2]; |
|
|
|
utterance.text = QUESTION_TWO; |
|
|
|
window.speechSynthesis.speak(utterance); |
|
|
|
utterance.onend = function (event) { |
|
|
|
window.setTimeout( |
|
|
|
function () { |
|
|
|
recognition.stop(); |
|
|
|
handleAnswer(answerQuery); |
|
|
|
}, 6000); |
|
|
|
recognizeSpeech(); |
|
|
|
}; |
|
|
|
} |
|
|
|
|
|
|
|
function readQuestionThree () { |
|
|
|
recognition = false; |
|
|
|
speak('Dankeschön. Weiter geht es mit der nächsten Frage. '); |
|
|
|
let utterance = new SpeechSynthesisUtterance(); |
|
|
|
utterance.voice = voices[2]; |
|
|
|
utterance.text = QUESTION_THREE; |
|
|
|
window.speechSynthesis.speak(utterance); |
|
|
|
utterance.onend = function (event) { |
|
|
|
console.log('speach end'); |
|
|
|
speak(QUESTION_THREE_QUESTIONS_PT1[questionThreeCount]); |
|
|
|
}; |
|
|
|
utterance.onerror = function (event) { |
|
|
|
console.log('An error has occurred with the speech synthesis: ' + event.error); |
|
|
|
}; |
|
|
|
} |
|
|
|
|
|
|
|
function handleAnswer (query) { |
|
|
|
switch (question) { |
|
|
|
case 1: |
|
|
@@ -242,14 +304,18 @@ function handleAnswer (query) { |
|
|
|
|
|
|
|
function handleAnswerToFirstQuestion (answer) { |
|
|
|
var tokens = answer.split(new RegExp(separators.join('|'), 'g')); |
|
|
|
questionOnePoints = calculatePoints(tokens, QUESTION_ONE_ANSWERS); |
|
|
|
questionOnePoints += calculatePoints(tokens, QUESTION_ONE_ANSWERS); |
|
|
|
if (partTwo) { |
|
|
|
partTwo = false; |
|
|
|
skipRecording = true; |
|
|
|
speak('Vielen Dank, nun geht es weiter mit der nächsten Frage'); |
|
|
|
startQuestion(2); |
|
|
|
// state = 'detect' |
|
|
|
} else { |
|
|
|
rePrompt = false; |
|
|
|
skipRecording = true; |
|
|
|
speak(QUESTION_ONE_PT2); |
|
|
|
readQuestionOne(QUESTION_ONE); |
|
|
|
partTwo = true; |
|
|
|
} |
|
|
|
} |
|
|
@@ -257,8 +323,6 @@ function handleAnswerToFirstQuestion (answer) { |
|
|
|
function handleAnswerToSecondQuestion (answer) { |
|
|
|
var tokens = answer.split(new RegExp(separators.join('|'), 'g')); |
|
|
|
questionTwoPoints = calculatePoints(tokens, QUESTION_TWO_ANSWERS); |
|
|
|
skipRecording = true; |
|
|
|
speak('Sie haben ' + questionOnePoints + 'Punkte'); |
|
|
|
startQuestion(3); |
|
|
|
// state = 'detect' |
|
|
|
} |
|
|
@@ -274,11 +338,11 @@ function handleAnswerToThirdQuestion (query) { |
|
|
|
} else { |
|
|
|
answerArray = QUESTION_THREE_ANSWERS_PT2; |
|
|
|
} |
|
|
|
if (query === answerArray[count]) { |
|
|
|
if (query === answerArray[questionThreeCount]) { |
|
|
|
strike = 0; |
|
|
|
partTwo = false; |
|
|
|
count++; |
|
|
|
questionThreePoints = count + 1; |
|
|
|
questionThreeCount++; |
|
|
|
questionThreePoints = questionThreeCount + 1; |
|
|
|
questionArray = QUESTION_THREE_QUESTIONS_PT1; |
|
|
|
} else { |
|
|
|
strike++; |
|
|
@@ -286,7 +350,7 @@ function handleAnswerToThirdQuestion (query) { |
|
|
|
questionArray = QUESTION_THREE_QUESTIONS_PT2; |
|
|
|
} |
|
|
|
|
|
|
|
if (strike === 2 || count === 5) { |
|
|
|
if (strike === 2 || questionThreeCount === 5) { |
|
|
|
speechsynth.rate = 1; |
|
|
|
skipRecording = true; |
|
|
|
speak('weiter geht es mit der Nächsten Frage'); |
|
|
@@ -294,104 +358,63 @@ function handleAnswerToThirdQuestion (query) { |
|
|
|
return; |
|
|
|
} |
|
|
|
|
|
|
|
speak(questionArray[count]); |
|
|
|
speak(questionArray[questionThreeCount]); |
|
|
|
|
|
|
|
console.log('count: ' + count + ', strike: ' + strike + ', points: ' + questionThreePoints); |
|
|
|
console.log('count: ' + questionThreeCount + ', strike: ' + strike + ', points: ' + questionThreePoints); |
|
|
|
} |
|
|
|
// #endregion |
|
|
|
|
|
|
|
// Question specific functions |
|
|
|
function readQuestionOne (text) { |
|
|
|
skipRecording = true; |
|
|
|
speak(text); |
|
|
|
for (let i = 0; i < QUESTION_ONE_QUESTIONS.length; i++) { |
|
|
|
let utterance = new SpeechSynthesisUtterance(); |
|
|
|
utterance.voice = voices[2]; |
|
|
|
utterance.rate = 0.75; |
|
|
|
utterance.text = QUESTION_ONE_QUESTIONS[i]; |
|
|
|
window.speechSynthesis.speak(utterance); |
|
|
|
if (i === 9) { |
|
|
|
utterance.onend = function (event) { |
|
|
|
console.log('end of aufzählung' + i); |
|
|
|
recognizeSpeech(); |
|
|
|
}; |
|
|
|
} |
|
|
|
} |
|
|
|
// #region global functions |
|
|
|
function startDemenzScreening () { |
|
|
|
// ws.send('starte demenz test'); |
|
|
|
startQuestion(3); |
|
|
|
testBtn.disabled = true; |
|
|
|
testBtn.textContent = 'Test in progress'; |
|
|
|
infoPara.textContent = 'wait...'; |
|
|
|
diagnosticPara.textContent = 'detecting...'; |
|
|
|
} |
|
|
|
|
|
|
|
function readQuestionTwo (text) { |
|
|
|
let utterance = new SpeechSynthesisUtterance(); |
|
|
|
utterance.voice = voices[2]; |
|
|
|
utterance.text = text; |
|
|
|
window.speechSynthesis.speak(utterance); |
|
|
|
console.log('q 2 started'); |
|
|
|
utterance.onend = function (event) { |
|
|
|
window.setTimeout( |
|
|
|
function () { |
|
|
|
recognition.stop(); |
|
|
|
handleAnswer(answerQuery); |
|
|
|
}, 60000); |
|
|
|
console.log('q 2 recognition started'); |
|
|
|
recognizeSpeech(); |
|
|
|
}; |
|
|
|
function speak (sentence) { |
|
|
|
speechsynth.text = sentence; |
|
|
|
window.speechSynthesis.speak(speechsynth); |
|
|
|
} |
|
|
|
|
|
|
|
function calculatePoints (tokens, array) { |
|
|
|
let points = 0; |
|
|
|
for (let i of array) { |
|
|
|
for (let j of tokens) { |
|
|
|
if (i === j) { |
|
|
|
points++; |
|
|
|
} |
|
|
|
} |
|
|
|
} |
|
|
|
return points; |
|
|
|
function testSpeechOut () { |
|
|
|
console.log('click'); |
|
|
|
speechsynth.text = 'test 123'; |
|
|
|
speechsynth.volume = 1; |
|
|
|
speechsynth.rate = 1; |
|
|
|
console.log(speechsynth); |
|
|
|
window.speechSynthesis.speak(speechsynth); |
|
|
|
console.log(window.speechSynthesis); |
|
|
|
} |
|
|
|
|
|
|
|
speechsynth.onend = function (event) { |
|
|
|
switch (question) { |
|
|
|
case 1: |
|
|
|
break; |
|
|
|
case 2: |
|
|
|
break; |
|
|
|
case 3: |
|
|
|
break; |
|
|
|
case 4: |
|
|
|
break; |
|
|
|
case 5: |
|
|
|
break; |
|
|
|
} |
|
|
|
if (!skipRecording) { |
|
|
|
recognizeSpeech(); |
|
|
|
} |
|
|
|
skipRecording = false; |
|
|
|
console.log('global speech end'); |
|
|
|
}; |
|
|
|
|
|
|
|
function recognizeSpeech () { |
|
|
|
if (state === 'answer') { |
|
|
|
var arr; |
|
|
|
switch (question) { |
|
|
|
case 1: |
|
|
|
arr = QUESTION_ONE_QUESTIONS; |
|
|
|
break; |
|
|
|
case 2: |
|
|
|
return; |
|
|
|
case 3: |
|
|
|
arr = [1, 2, 3, 4, 5, 6, 7, 8, 9]; |
|
|
|
break; |
|
|
|
case 4: |
|
|
|
break; |
|
|
|
case 5: |
|
|
|
break; |
|
|
|
} |
|
|
|
|
|
|
|
var grammar = '#JSGF V1.0; grammar colors; public <color> = ' + arr.join(' | ') + ' ;'; |
|
|
|
var speechRecognitionList = new SpeechGrammarList(); |
|
|
|
speechRecognitionList.addFromString(grammar, 1); |
|
|
|
recognition.grammars = speechRecognitionList; |
|
|
|
} |
|
|
|
// if (state === 'answer') { |
|
|
|
// var arr; |
|
|
|
// switch (question) { |
|
|
|
// case 1: |
|
|
|
// arr = QUESTION_ONE_QUESTIONS; |
|
|
|
// break; |
|
|
|
// case 2: |
|
|
|
// // arr = QUESTION_TWO_QUESTIONS; |
|
|
|
// break; |
|
|
|
// case 3: |
|
|
|
// arr = [1, 2, 3, 4, 5, 6, 7, 8, 9]; |
|
|
|
// break; |
|
|
|
// case 4: |
|
|
|
// break; |
|
|
|
// case 5: |
|
|
|
// break; |
|
|
|
// } |
|
|
|
|
|
|
|
// // var grammar = '#JSGF V1.0; grammar colors; public <color> = ' + arr.join(' | ') + ' ;'; |
|
|
|
// // var speechRecognitionList = new SpeechGrammarList(); |
|
|
|
// // speechRecognitionList.addFromString(grammar, 1); |
|
|
|
// // recognition.grammars = speechRecognitionList; |
|
|
|
// } |
|
|
|
recognition.start(); |
|
|
|
console.log('reocgnition started'); |
|
|
|
console.log('reocgnition started. Question: ' + question); |
|
|
|
|
|
|
|
recognition.onresult = function (event) { |
|
|
|
var last = event.results.length - 1; |
|
|
@@ -400,7 +423,7 @@ function recognizeSpeech () { |
|
|
|
diagnosticPara.textContent += speechResult + ' '; |
|
|
|
|
|
|
|
// console.log('Confidence: ' + event.results[0][0].confidence) |
|
|
|
|
|
|
|
console.log('process: ' + speechResult); |
|
|
|
processSpeech(speechResult); |
|
|
|
// testBtn.disabled = false |
|
|
|
// testBtn.textContent = 'record...' |
|
|
@@ -427,6 +450,7 @@ function recognizeSpeech () { |
|
|
|
case 4: |
|
|
|
break; |
|
|
|
case 5: |
|
|
|
timeOut = 6500; |
|
|
|
break; |
|
|
|
} |
|
|
|
|
|
|
@@ -505,5 +529,16 @@ function recognizeSpeech () { |
|
|
|
}; |
|
|
|
} |
|
|
|
|
|
|
|
function calculatePoints (tokens, dict) { |
|
|
|
let points = 0; |
|
|
|
for (let word of tokens) { |
|
|
|
if (dict[word] !== undefined) { |
|
|
|
points += dict[word]; |
|
|
|
} |
|
|
|
} |
|
|
|
return points; |
|
|
|
} |
|
|
|
// #endregion |
|
|
|
|
|
|
|
testBtn.addEventListener('click', startDemenzScreening); |
|
|
|
testBtn2.addEventListener('click', testSpeechOut); |