fixed problem where recognition started too early
plus readability improvements
This commit is contained in:
parent
11c87ece10
commit
cbddf1d1c7
@ -67,13 +67,13 @@ function LoadQuestionTwo () {
|
||||
// #endregion
|
||||
|
||||
// #region points
|
||||
var questionOnePoints = 0;
|
||||
var questionTwoPoints = 0;
|
||||
var questionThreePoints = 0;
|
||||
var questionFourPoints = 0;
|
||||
var questionFivePoints = 0;
|
||||
var questionSixPoints = 0;
|
||||
// #endregion
|
||||
const questionPoints = {
|
||||
1: 0,
|
||||
2: 0,
|
||||
3: 0,
|
||||
4: 0,
|
||||
5: 0 };
|
||||
// #endregion
|
||||
|
||||
// tokenization
|
||||
const separators = [' ', '\\\+', '-', '\\\(', '\\\)', '\\*', '/', ':', '\\\?'];
|
||||
@ -136,10 +136,11 @@ speechsynth.onend = function (event) {
|
||||
break;
|
||||
}
|
||||
if (!skipRecording) {
|
||||
recognizeSpeech();
|
||||
recognition.start();
|
||||
console.log('reocgnition started. Question: ' + question);
|
||||
}
|
||||
skipRecording = false;
|
||||
diagnosticPara = '';
|
||||
diagnosticPara.textContent = '';
|
||||
console.log('global speech end');
|
||||
};
|
||||
// #endregion
|
||||
@ -246,7 +247,8 @@ function readQuestionOne () {
|
||||
window.speechSynthesis.speak(utterance);
|
||||
if (i === 9) {
|
||||
utterance.onend = function (event) {
|
||||
recognizeSpeech();
|
||||
recognition.start();
|
||||
console.log('reocgnition started. Question: ' + question);
|
||||
};
|
||||
}
|
||||
}
|
||||
@ -261,21 +263,22 @@ function readQuestionTwo () {
|
||||
window.setTimeout(
|
||||
function () {
|
||||
recognition.stop();
|
||||
console.log('recognition stopped');
|
||||
handleAnswer(answerQuery);
|
||||
}, 60000);
|
||||
recognizeSpeech();
|
||||
}, 6000);
|
||||
recognition.start();
|
||||
console.log('reocgnition started. Question: ' + question);
|
||||
};
|
||||
}
|
||||
|
||||
function readQuestionThree () {
|
||||
recognition = false;
|
||||
speak('Dankeschön. Weiter geht es mit der nächsten Frage. ');
|
||||
skipRecording = true;
|
||||
speak('Dankeschön, weiter geht es mit der nächsten Frage.');
|
||||
let utterance = new SpeechSynthesisUtterance();
|
||||
utterance.voice = voices[2];
|
||||
utterance.text = QUESTION_THREE;
|
||||
window.speechSynthesis.speak(utterance);
|
||||
utterance.onend = function (event) {
|
||||
console.log('speach end');
|
||||
speak(QUESTION_THREE_QUESTIONS_PT1[questionThreeCount]);
|
||||
};
|
||||
utterance.onerror = function (event) {
|
||||
@ -303,9 +306,10 @@ function handleAnswer (query) {
|
||||
|
||||
function handleAnswerToFirstQuestion (answer) {
|
||||
var tokens = answer.split(new RegExp(separators.join('|'), 'g'));
|
||||
questionOnePoints += calculatePoints(tokens, QUESTION_ONE_ANSWERS);
|
||||
questionPoints[question] += calculatePoints(tokens, QUESTION_ONE_ANSWERS);
|
||||
if (partTwo) {
|
||||
partTwo = false;
|
||||
console.log('question 1 points: ' + questionPoints[question]);
|
||||
skipRecording = true;
|
||||
speak('Vielen Dank, nun geht es weiter mit der nächsten Frage');
|
||||
startQuestion(2);
|
||||
@ -321,7 +325,8 @@ function handleAnswerToFirstQuestion (answer) {
|
||||
|
||||
function handleAnswerToSecondQuestion (answer) {
|
||||
var tokens = answer.split(new RegExp(separators.join('|'), 'g'));
|
||||
questionTwoPoints = calculatePoints(tokens, QUESTION_TWO_ANSWERS);
|
||||
questionPoints[question] = calculatePoints(tokens, QUESTION_TWO_ANSWERS);
|
||||
console.log('question 2 points: ' + questionPoints[question]);
|
||||
startQuestion(3);
|
||||
// state = 'detect'
|
||||
}
|
||||
@ -341,7 +346,7 @@ function handleAnswerToThirdQuestion (query) {
|
||||
strike = 0;
|
||||
partTwo = false;
|
||||
questionThreeCount++;
|
||||
questionThreePoints = questionThreeCount + 1;
|
||||
questionPoints[question] = questionThreeCount + 1;
|
||||
questionArray = QUESTION_THREE_QUESTIONS_PT1;
|
||||
} else {
|
||||
strike++;
|
||||
@ -351,6 +356,7 @@ function handleAnswerToThirdQuestion (query) {
|
||||
|
||||
if (strike === 2 || questionThreeCount === 5) {
|
||||
speechsynth.rate = 1;
|
||||
console.log('question 3 points: ' + questionPoints[question]);
|
||||
skipRecording = true;
|
||||
speak('weiter geht es mit der Nächsten Frage');
|
||||
startQuestion(4);
|
||||
@ -359,13 +365,14 @@ function handleAnswerToThirdQuestion (query) {
|
||||
|
||||
speak(questionArray[questionThreeCount]);
|
||||
|
||||
console.log('count: ' + questionThreeCount + ', strike: ' + strike + ', points: ' + questionThreePoints);
|
||||
console.log('count: ' + questionThreeCount + ', strike: ' + strike + ', points: ' + questionPoints[question]);
|
||||
}
|
||||
// #endregion
|
||||
|
||||
// #region global functions
|
||||
function startDemenzScreening () {
|
||||
ws.send('starte demenz test');
|
||||
// startQuestion(2);
|
||||
testBtn.disabled = true;
|
||||
testBtn.textContent = 'Test in progress';
|
||||
infoPara.textContent = 'wait...';
|
||||
@ -378,160 +385,171 @@ function speak (sentence) {
|
||||
}
|
||||
|
||||
function testSpeechOut () {
|
||||
console.log('click');
|
||||
speechsynth.text = 'test 123';
|
||||
speechsynth.volume = 1;
|
||||
speechsynth.rate = 1;
|
||||
console.log(speechsynth);
|
||||
window.speechSynthesis.speak(speechsynth);
|
||||
console.log(window.speechSynthesis);
|
||||
}
|
||||
|
||||
function recognizeSpeech () {
|
||||
// if (state === 'answer') {
|
||||
// var arr;
|
||||
// switch (question) {
|
||||
// case 1:
|
||||
// arr = QUESTION_ONE_QUESTIONS;
|
||||
// break;
|
||||
// case 2:
|
||||
// // arr = QUESTION_TWO_QUESTIONS;
|
||||
// break;
|
||||
// case 3:
|
||||
// arr = [1, 2, 3, 4, 5, 6, 7, 8, 9];
|
||||
// break;
|
||||
// case 4:
|
||||
// break;
|
||||
// case 5:
|
||||
// break;
|
||||
// }
|
||||
|
||||
// // var grammar = '#JSGF V1.0; grammar colors; public <color> = ' + arr.join(' | ') + ' ;';
|
||||
// // var speechRecognitionList = new SpeechGrammarList();
|
||||
// // speechRecognitionList.addFromString(grammar, 1);
|
||||
// // recognition.grammars = speechRecognitionList;
|
||||
// }
|
||||
recognition.start();
|
||||
console.log('reocgnition started. Question: ' + question);
|
||||
|
||||
recognition.onresult = function (event) {
|
||||
var last = event.results.length - 1;
|
||||
var speechResult = event.results[last][0].transcript.toLowerCase();
|
||||
|
||||
diagnosticPara.textContent += speechResult + ' ';
|
||||
|
||||
// console.log('Confidence: ' + event.results[0][0].confidence)
|
||||
console.log('process: ' + speechResult);
|
||||
processSpeech(speechResult);
|
||||
// testBtn.disabled = false
|
||||
// testBtn.textContent = 'record...'
|
||||
};
|
||||
|
||||
function processSpeech (speechResult) {
|
||||
console.log('To dialogflow: ' + speechResult);
|
||||
ws.send(speechResult);
|
||||
|
||||
let timeOut;
|
||||
switch (question) {
|
||||
case 1:
|
||||
timeOut = 6500;
|
||||
break;
|
||||
case 2:
|
||||
answerQuery += speechResult;
|
||||
return;
|
||||
case 3:
|
||||
if (speechResult.includes('uhr')) {
|
||||
speechResult = speechResult.replace('uhr', '');
|
||||
}
|
||||
timeOut = 6500;
|
||||
break;
|
||||
case 4:
|
||||
break;
|
||||
case 5:
|
||||
timeOut = 6500;
|
||||
break;
|
||||
}
|
||||
|
||||
if (state === 'answer') {
|
||||
if (timerId != undefined) {
|
||||
clearTimeout(timerId);
|
||||
}
|
||||
answerQuery += speechResult;
|
||||
timerId = window.setTimeout(
|
||||
function () {
|
||||
// if (!rePrompt) {
|
||||
// ws.send('ich brauche noch etwas Zeit')
|
||||
// } else {
|
||||
console.log('recording end. Evaluate: ' + answerQuery);
|
||||
handleAnswer(answerQuery);
|
||||
answerQuery = '';
|
||||
diagnosticPara.textContent = '';
|
||||
// }
|
||||
recognition.stop();
|
||||
console.log('timer fallback');
|
||||
}, timeOut);
|
||||
} else {
|
||||
console.log('recording end.');
|
||||
recognition.stop();
|
||||
}
|
||||
answerQuery = 'apfel wiese tisch apfel lampe pferd';
|
||||
question = 1;
|
||||
for (let i = 0; i < 2; i++) {
|
||||
var tokens = answerQuery.split(new RegExp(separators.join('|'), 'g'));
|
||||
questionPoints[question] += calculatePoints(tokens, QUESTION_ONE_ANSWERS);
|
||||
}
|
||||
|
||||
recognition.onspeechend = function () {
|
||||
// recognition.stop();
|
||||
// testBtn.disabled = false;
|
||||
// testBtn.textContent = 'Start new test';
|
||||
};
|
||||
console.log(questionPoints[question]);
|
||||
|
||||
recognition.onerror = function (event) {
|
||||
testBtn.disabled = false;
|
||||
testBtn.textContent = 'Start new test';
|
||||
diagnosticPara.textContent = 'Error occurred in recognition: ' + event.error;
|
||||
};
|
||||
|
||||
recognition.onaudiostart = function (event) {
|
||||
// Fired when the user agent has started to capture audio.
|
||||
|
||||
};
|
||||
|
||||
recognition.onaudioend = function (event) {
|
||||
|
||||
};
|
||||
|
||||
recognition.onend = function (event) {
|
||||
// Fired when the speech recognition service has disconnected.
|
||||
|
||||
};
|
||||
|
||||
recognition.onnomatch = function (event) {
|
||||
// Fired when the speech recognition service returns a final result with no significant recognition. This may involve some degree of recognition, which doesn't meet or exceed the confidence threshold.
|
||||
// console.log('SpeechRecognition.onnomatch')
|
||||
};
|
||||
|
||||
recognition.onsoundstart = function (event) {
|
||||
// Fired when any sound — recognisable speech or not — has been detected.
|
||||
|
||||
};
|
||||
|
||||
recognition.onsoundend = function (event) {
|
||||
// Fired when any sound — recognisable speech or not — has stopped being detected.
|
||||
|
||||
};
|
||||
|
||||
recognition.onspeechstart = function (event) {
|
||||
// Fired when sound that is recognised by the speech recognition service as speech has been detected.
|
||||
|
||||
};
|
||||
recognition.onstart = function (event) {
|
||||
// Fired when the speech recognition service has begun listening to incoming audio with intent to recognize grammars associated with the current SpeechRecognition.
|
||||
|
||||
};
|
||||
// speechsynth.text = 'test 123';
|
||||
// speechsynth.volume = 1;
|
||||
// speechsynth.rate = 1;
|
||||
// console.log(speechsynth);
|
||||
// window.speechSynthesis.speak(speechsynth);
|
||||
// console.log(window.speechSynthesis);
|
||||
}
|
||||
|
||||
function calculatePoints (tokens, dict) {
|
||||
// function recognizeSpeech () {
|
||||
// if (state === 'answer') {
|
||||
// var arr;
|
||||
// switch (question) {
|
||||
// case 1:
|
||||
// arr = QUESTION_ONE_QUESTIONS;
|
||||
// break;
|
||||
// case 2:
|
||||
// // arr = QUESTION_TWO_QUESTIONS;
|
||||
// break;
|
||||
// case 3:
|
||||
// arr = [1, 2, 3, 4, 5, 6, 7, 8, 9];
|
||||
// break;
|
||||
// case 4:
|
||||
// break;
|
||||
// case 5:
|
||||
// break;
|
||||
// }
|
||||
|
||||
// // var grammar = '#JSGF V1.0; grammar colors; public <color> = ' + arr.join(' | ') + ' ;';
|
||||
// // var speechRecognitionList = new SpeechGrammarList();
|
||||
// // speechRecognitionList.addFromString(grammar, 1);
|
||||
// // recognition.grammars = speechRecognitionList;
|
||||
// }
|
||||
|
||||
recognition.onresult = function (event) {
|
||||
var last = event.results.length - 1;
|
||||
var speechResult = event.results[last][0].transcript.toLowerCase();
|
||||
|
||||
diagnosticPara.textContent += speechResult + ' ';
|
||||
|
||||
// console.log('Confidence: ' + event.results[0][0].confidence)
|
||||
console.log('process: ' + speechResult);
|
||||
processSpeech(speechResult);
|
||||
// testBtn.disabled = false
|
||||
// testBtn.textContent = 'record...'
|
||||
};
|
||||
|
||||
function processSpeech (speechResult) {
|
||||
console.log('To dialogflow: ' + speechResult);
|
||||
ws.send(speechResult);
|
||||
|
||||
let timeOut;
|
||||
switch (question) {
|
||||
case 1:
|
||||
timeOut = 6500;
|
||||
break;
|
||||
case 2:
|
||||
answerQuery += speechResult;
|
||||
return;
|
||||
case 3:
|
||||
if (speechResult.includes('uhr')) {
|
||||
speechResult = speechResult.replace('uhr', '');
|
||||
}
|
||||
timeOut = 6500;
|
||||
break;
|
||||
case 4:
|
||||
break;
|
||||
case 5:
|
||||
timeOut = 6500;
|
||||
break;
|
||||
}
|
||||
|
||||
if (state === 'answer') {
|
||||
if (timerId != undefined) {
|
||||
clearTimeout(timerId);
|
||||
}
|
||||
answerQuery += speechResult;
|
||||
timerId = window.setTimeout(
|
||||
function () {
|
||||
// if (!rePrompt) {
|
||||
// ws.send('ich brauche noch etwas Zeit')
|
||||
// } else {
|
||||
console.log('recording end. Evaluate: ' + answerQuery);
|
||||
handleAnswer(answerQuery);
|
||||
answerQuery = '';
|
||||
diagnosticPara.textContent = '';
|
||||
// }
|
||||
recognition.stop();
|
||||
console.log('timer fallback');
|
||||
}, timeOut);
|
||||
} else {
|
||||
console.log('recording end.');
|
||||
recognition.stop();
|
||||
}
|
||||
}
|
||||
|
||||
// #region speech recognition event
|
||||
recognition.onspeechend = function () {
|
||||
// recognition.stop();
|
||||
// testBtn.disabled = false;
|
||||
// testBtn.textContent = 'Start new test';
|
||||
};
|
||||
|
||||
recognition.onerror = function (event) {
|
||||
testBtn.disabled = false;
|
||||
testBtn.textContent = 'Start new test';
|
||||
diagnosticPara.textContent = 'Error occurred in recognition: ' + event.error;
|
||||
};
|
||||
|
||||
recognition.onaudiostart = function (event) {
|
||||
// Fired when the user agent has started to capture audio.
|
||||
|
||||
};
|
||||
|
||||
recognition.onaudioend = function (event) {
|
||||
|
||||
};
|
||||
|
||||
recognition.onend = function (event) {
|
||||
// Fired when the speech recognition service has disconnected.
|
||||
|
||||
};
|
||||
|
||||
recognition.onnomatch = function (event) {
|
||||
// Fired when the speech recognition service returns a final result with no significant recognition. This may involve some degree of recognition, which doesn't meet or exceed the confidence threshold.
|
||||
// console.log('SpeechRecognition.onnomatch')
|
||||
};
|
||||
|
||||
recognition.onsoundstart = function (event) {
|
||||
// Fired when any sound — recognisable speech or not — has been detected.
|
||||
|
||||
};
|
||||
|
||||
recognition.onsoundend = function (event) {
|
||||
// Fired when any sound — recognisable speech or not — has stopped being detected.
|
||||
|
||||
};
|
||||
|
||||
recognition.onspeechstart = function (event) {
|
||||
// Fired when sound that is recognised by the speech recognition service as speech has been detected.
|
||||
|
||||
};
|
||||
recognition.onstart = function (event) {
|
||||
// Fired when the speech recognition service has begun listening to incoming audio with intent to recognize grammars associated with the current SpeechRecognition.
|
||||
|
||||
};
|
||||
// }
|
||||
// #endregion
|
||||
|
||||
function calculatePoints (tokens, d) {
|
||||
let points = 0;
|
||||
let dict = {};
|
||||
Object.assign(dict, d);
|
||||
for (let word of tokens) {
|
||||
if (dict[word] !== undefined) {
|
||||
points += dict[word];
|
||||
delete dict[word];
|
||||
}
|
||||
}
|
||||
return points;
|
||||
|
Loading…
x
Reference in New Issue
Block a user