fixed problem where recognition started too early
plus readability improvements
This commit is contained in:
parent
11c87ece10
commit
cbddf1d1c7
@ -67,13 +67,13 @@ function LoadQuestionTwo () {
|
|||||||
// #endregion
|
// #endregion
|
||||||
|
|
||||||
// #region points
|
// #region points
|
||||||
var questionOnePoints = 0;
|
const questionPoints = {
|
||||||
var questionTwoPoints = 0;
|
1: 0,
|
||||||
var questionThreePoints = 0;
|
2: 0,
|
||||||
var questionFourPoints = 0;
|
3: 0,
|
||||||
var questionFivePoints = 0;
|
4: 0,
|
||||||
var questionSixPoints = 0;
|
5: 0 };
|
||||||
// #endregion
|
// #endregion
|
||||||
|
|
||||||
// tokenization
|
// tokenization
|
||||||
const separators = [' ', '\\\+', '-', '\\\(', '\\\)', '\\*', '/', ':', '\\\?'];
|
const separators = [' ', '\\\+', '-', '\\\(', '\\\)', '\\*', '/', ':', '\\\?'];
|
||||||
@ -136,10 +136,11 @@ speechsynth.onend = function (event) {
|
|||||||
break;
|
break;
|
||||||
}
|
}
|
||||||
if (!skipRecording) {
|
if (!skipRecording) {
|
||||||
recognizeSpeech();
|
recognition.start();
|
||||||
|
console.log('reocgnition started. Question: ' + question);
|
||||||
}
|
}
|
||||||
skipRecording = false;
|
skipRecording = false;
|
||||||
diagnosticPara = '';
|
diagnosticPara.textContent = '';
|
||||||
console.log('global speech end');
|
console.log('global speech end');
|
||||||
};
|
};
|
||||||
// #endregion
|
// #endregion
|
||||||
@ -246,7 +247,8 @@ function readQuestionOne () {
|
|||||||
window.speechSynthesis.speak(utterance);
|
window.speechSynthesis.speak(utterance);
|
||||||
if (i === 9) {
|
if (i === 9) {
|
||||||
utterance.onend = function (event) {
|
utterance.onend = function (event) {
|
||||||
recognizeSpeech();
|
recognition.start();
|
||||||
|
console.log('reocgnition started. Question: ' + question);
|
||||||
};
|
};
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
@ -261,21 +263,22 @@ function readQuestionTwo () {
|
|||||||
window.setTimeout(
|
window.setTimeout(
|
||||||
function () {
|
function () {
|
||||||
recognition.stop();
|
recognition.stop();
|
||||||
|
console.log('recognition stopped');
|
||||||
handleAnswer(answerQuery);
|
handleAnswer(answerQuery);
|
||||||
}, 60000);
|
}, 6000);
|
||||||
recognizeSpeech();
|
recognition.start();
|
||||||
|
console.log('reocgnition started. Question: ' + question);
|
||||||
};
|
};
|
||||||
}
|
}
|
||||||
|
|
||||||
function readQuestionThree () {
|
function readQuestionThree () {
|
||||||
recognition = false;
|
skipRecording = true;
|
||||||
speak('Dankeschön. Weiter geht es mit der nächsten Frage. ');
|
speak('Dankeschön, weiter geht es mit der nächsten Frage.');
|
||||||
let utterance = new SpeechSynthesisUtterance();
|
let utterance = new SpeechSynthesisUtterance();
|
||||||
utterance.voice = voices[2];
|
utterance.voice = voices[2];
|
||||||
utterance.text = QUESTION_THREE;
|
utterance.text = QUESTION_THREE;
|
||||||
window.speechSynthesis.speak(utterance);
|
window.speechSynthesis.speak(utterance);
|
||||||
utterance.onend = function (event) {
|
utterance.onend = function (event) {
|
||||||
console.log('speach end');
|
|
||||||
speak(QUESTION_THREE_QUESTIONS_PT1[questionThreeCount]);
|
speak(QUESTION_THREE_QUESTIONS_PT1[questionThreeCount]);
|
||||||
};
|
};
|
||||||
utterance.onerror = function (event) {
|
utterance.onerror = function (event) {
|
||||||
@ -303,9 +306,10 @@ function handleAnswer (query) {
|
|||||||
|
|
||||||
function handleAnswerToFirstQuestion (answer) {
|
function handleAnswerToFirstQuestion (answer) {
|
||||||
var tokens = answer.split(new RegExp(separators.join('|'), 'g'));
|
var tokens = answer.split(new RegExp(separators.join('|'), 'g'));
|
||||||
questionOnePoints += calculatePoints(tokens, QUESTION_ONE_ANSWERS);
|
questionPoints[question] += calculatePoints(tokens, QUESTION_ONE_ANSWERS);
|
||||||
if (partTwo) {
|
if (partTwo) {
|
||||||
partTwo = false;
|
partTwo = false;
|
||||||
|
console.log('question 1 points: ' + questionPoints[question]);
|
||||||
skipRecording = true;
|
skipRecording = true;
|
||||||
speak('Vielen Dank, nun geht es weiter mit der nächsten Frage');
|
speak('Vielen Dank, nun geht es weiter mit der nächsten Frage');
|
||||||
startQuestion(2);
|
startQuestion(2);
|
||||||
@ -321,7 +325,8 @@ function handleAnswerToFirstQuestion (answer) {
|
|||||||
|
|
||||||
function handleAnswerToSecondQuestion (answer) {
|
function handleAnswerToSecondQuestion (answer) {
|
||||||
var tokens = answer.split(new RegExp(separators.join('|'), 'g'));
|
var tokens = answer.split(new RegExp(separators.join('|'), 'g'));
|
||||||
questionTwoPoints = calculatePoints(tokens, QUESTION_TWO_ANSWERS);
|
questionPoints[question] = calculatePoints(tokens, QUESTION_TWO_ANSWERS);
|
||||||
|
console.log('question 2 points: ' + questionPoints[question]);
|
||||||
startQuestion(3);
|
startQuestion(3);
|
||||||
// state = 'detect'
|
// state = 'detect'
|
||||||
}
|
}
|
||||||
@ -341,7 +346,7 @@ function handleAnswerToThirdQuestion (query) {
|
|||||||
strike = 0;
|
strike = 0;
|
||||||
partTwo = false;
|
partTwo = false;
|
||||||
questionThreeCount++;
|
questionThreeCount++;
|
||||||
questionThreePoints = questionThreeCount + 1;
|
questionPoints[question] = questionThreeCount + 1;
|
||||||
questionArray = QUESTION_THREE_QUESTIONS_PT1;
|
questionArray = QUESTION_THREE_QUESTIONS_PT1;
|
||||||
} else {
|
} else {
|
||||||
strike++;
|
strike++;
|
||||||
@ -351,6 +356,7 @@ function handleAnswerToThirdQuestion (query) {
|
|||||||
|
|
||||||
if (strike === 2 || questionThreeCount === 5) {
|
if (strike === 2 || questionThreeCount === 5) {
|
||||||
speechsynth.rate = 1;
|
speechsynth.rate = 1;
|
||||||
|
console.log('question 3 points: ' + questionPoints[question]);
|
||||||
skipRecording = true;
|
skipRecording = true;
|
||||||
speak('weiter geht es mit der Nächsten Frage');
|
speak('weiter geht es mit der Nächsten Frage');
|
||||||
startQuestion(4);
|
startQuestion(4);
|
||||||
@ -359,13 +365,14 @@ function handleAnswerToThirdQuestion (query) {
|
|||||||
|
|
||||||
speak(questionArray[questionThreeCount]);
|
speak(questionArray[questionThreeCount]);
|
||||||
|
|
||||||
console.log('count: ' + questionThreeCount + ', strike: ' + strike + ', points: ' + questionThreePoints);
|
console.log('count: ' + questionThreeCount + ', strike: ' + strike + ', points: ' + questionPoints[question]);
|
||||||
}
|
}
|
||||||
// #endregion
|
// #endregion
|
||||||
|
|
||||||
// #region global functions
|
// #region global functions
|
||||||
function startDemenzScreening () {
|
function startDemenzScreening () {
|
||||||
ws.send('starte demenz test');
|
ws.send('starte demenz test');
|
||||||
|
// startQuestion(2);
|
||||||
testBtn.disabled = true;
|
testBtn.disabled = true;
|
||||||
testBtn.textContent = 'Test in progress';
|
testBtn.textContent = 'Test in progress';
|
||||||
infoPara.textContent = 'wait...';
|
infoPara.textContent = 'wait...';
|
||||||
@ -378,160 +385,171 @@ function speak (sentence) {
|
|||||||
}
|
}
|
||||||
|
|
||||||
function testSpeechOut () {
|
function testSpeechOut () {
|
||||||
console.log('click');
|
answerQuery = 'apfel wiese tisch apfel lampe pferd';
|
||||||
speechsynth.text = 'test 123';
|
question = 1;
|
||||||
speechsynth.volume = 1;
|
for (let i = 0; i < 2; i++) {
|
||||||
speechsynth.rate = 1;
|
var tokens = answerQuery.split(new RegExp(separators.join('|'), 'g'));
|
||||||
console.log(speechsynth);
|
questionPoints[question] += calculatePoints(tokens, QUESTION_ONE_ANSWERS);
|
||||||
window.speechSynthesis.speak(speechsynth);
|
|
||||||
console.log(window.speechSynthesis);
|
|
||||||
}
|
|
||||||
|
|
||||||
function recognizeSpeech () {
|
|
||||||
// if (state === 'answer') {
|
|
||||||
// var arr;
|
|
||||||
// switch (question) {
|
|
||||||
// case 1:
|
|
||||||
// arr = QUESTION_ONE_QUESTIONS;
|
|
||||||
// break;
|
|
||||||
// case 2:
|
|
||||||
// // arr = QUESTION_TWO_QUESTIONS;
|
|
||||||
// break;
|
|
||||||
// case 3:
|
|
||||||
// arr = [1, 2, 3, 4, 5, 6, 7, 8, 9];
|
|
||||||
// break;
|
|
||||||
// case 4:
|
|
||||||
// break;
|
|
||||||
// case 5:
|
|
||||||
// break;
|
|
||||||
// }
|
|
||||||
|
|
||||||
// // var grammar = '#JSGF V1.0; grammar colors; public <color> = ' + arr.join(' | ') + ' ;';
|
|
||||||
// // var speechRecognitionList = new SpeechGrammarList();
|
|
||||||
// // speechRecognitionList.addFromString(grammar, 1);
|
|
||||||
// // recognition.grammars = speechRecognitionList;
|
|
||||||
// }
|
|
||||||
recognition.start();
|
|
||||||
console.log('reocgnition started. Question: ' + question);
|
|
||||||
|
|
||||||
recognition.onresult = function (event) {
|
|
||||||
var last = event.results.length - 1;
|
|
||||||
var speechResult = event.results[last][0].transcript.toLowerCase();
|
|
||||||
|
|
||||||
diagnosticPara.textContent += speechResult + ' ';
|
|
||||||
|
|
||||||
// console.log('Confidence: ' + event.results[0][0].confidence)
|
|
||||||
console.log('process: ' + speechResult);
|
|
||||||
processSpeech(speechResult);
|
|
||||||
// testBtn.disabled = false
|
|
||||||
// testBtn.textContent = 'record...'
|
|
||||||
};
|
|
||||||
|
|
||||||
function processSpeech (speechResult) {
|
|
||||||
console.log('To dialogflow: ' + speechResult);
|
|
||||||
ws.send(speechResult);
|
|
||||||
|
|
||||||
let timeOut;
|
|
||||||
switch (question) {
|
|
||||||
case 1:
|
|
||||||
timeOut = 6500;
|
|
||||||
break;
|
|
||||||
case 2:
|
|
||||||
answerQuery += speechResult;
|
|
||||||
return;
|
|
||||||
case 3:
|
|
||||||
if (speechResult.includes('uhr')) {
|
|
||||||
speechResult = speechResult.replace('uhr', '');
|
|
||||||
}
|
|
||||||
timeOut = 6500;
|
|
||||||
break;
|
|
||||||
case 4:
|
|
||||||
break;
|
|
||||||
case 5:
|
|
||||||
timeOut = 6500;
|
|
||||||
break;
|
|
||||||
}
|
|
||||||
|
|
||||||
if (state === 'answer') {
|
|
||||||
if (timerId != undefined) {
|
|
||||||
clearTimeout(timerId);
|
|
||||||
}
|
|
||||||
answerQuery += speechResult;
|
|
||||||
timerId = window.setTimeout(
|
|
||||||
function () {
|
|
||||||
// if (!rePrompt) {
|
|
||||||
// ws.send('ich brauche noch etwas Zeit')
|
|
||||||
// } else {
|
|
||||||
console.log('recording end. Evaluate: ' + answerQuery);
|
|
||||||
handleAnswer(answerQuery);
|
|
||||||
answerQuery = '';
|
|
||||||
diagnosticPara.textContent = '';
|
|
||||||
// }
|
|
||||||
recognition.stop();
|
|
||||||
console.log('timer fallback');
|
|
||||||
}, timeOut);
|
|
||||||
} else {
|
|
||||||
console.log('recording end.');
|
|
||||||
recognition.stop();
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
|
|
||||||
recognition.onspeechend = function () {
|
console.log(questionPoints[question]);
|
||||||
// recognition.stop();
|
|
||||||
// testBtn.disabled = false;
|
|
||||||
// testBtn.textContent = 'Start new test';
|
|
||||||
};
|
|
||||||
|
|
||||||
recognition.onerror = function (event) {
|
// speechsynth.text = 'test 123';
|
||||||
testBtn.disabled = false;
|
// speechsynth.volume = 1;
|
||||||
testBtn.textContent = 'Start new test';
|
// speechsynth.rate = 1;
|
||||||
diagnosticPara.textContent = 'Error occurred in recognition: ' + event.error;
|
// console.log(speechsynth);
|
||||||
};
|
// window.speechSynthesis.speak(speechsynth);
|
||||||
|
// console.log(window.speechSynthesis);
|
||||||
recognition.onaudiostart = function (event) {
|
|
||||||
// Fired when the user agent has started to capture audio.
|
|
||||||
|
|
||||||
};
|
|
||||||
|
|
||||||
recognition.onaudioend = function (event) {
|
|
||||||
|
|
||||||
};
|
|
||||||
|
|
||||||
recognition.onend = function (event) {
|
|
||||||
// Fired when the speech recognition service has disconnected.
|
|
||||||
|
|
||||||
};
|
|
||||||
|
|
||||||
recognition.onnomatch = function (event) {
|
|
||||||
// Fired when the speech recognition service returns a final result with no significant recognition. This may involve some degree of recognition, which doesn't meet or exceed the confidence threshold.
|
|
||||||
// console.log('SpeechRecognition.onnomatch')
|
|
||||||
};
|
|
||||||
|
|
||||||
recognition.onsoundstart = function (event) {
|
|
||||||
// Fired when any sound — recognisable speech or not — has been detected.
|
|
||||||
|
|
||||||
};
|
|
||||||
|
|
||||||
recognition.onsoundend = function (event) {
|
|
||||||
// Fired when any sound — recognisable speech or not — has stopped being detected.
|
|
||||||
|
|
||||||
};
|
|
||||||
|
|
||||||
recognition.onspeechstart = function (event) {
|
|
||||||
// Fired when sound that is recognised by the speech recognition service as speech has been detected.
|
|
||||||
|
|
||||||
};
|
|
||||||
recognition.onstart = function (event) {
|
|
||||||
// Fired when the speech recognition service has begun listening to incoming audio with intent to recognize grammars associated with the current SpeechRecognition.
|
|
||||||
|
|
||||||
};
|
|
||||||
}
|
}
|
||||||
|
|
||||||
function calculatePoints (tokens, dict) {
|
// function recognizeSpeech () {
|
||||||
|
// if (state === 'answer') {
|
||||||
|
// var arr;
|
||||||
|
// switch (question) {
|
||||||
|
// case 1:
|
||||||
|
// arr = QUESTION_ONE_QUESTIONS;
|
||||||
|
// break;
|
||||||
|
// case 2:
|
||||||
|
// // arr = QUESTION_TWO_QUESTIONS;
|
||||||
|
// break;
|
||||||
|
// case 3:
|
||||||
|
// arr = [1, 2, 3, 4, 5, 6, 7, 8, 9];
|
||||||
|
// break;
|
||||||
|
// case 4:
|
||||||
|
// break;
|
||||||
|
// case 5:
|
||||||
|
// break;
|
||||||
|
// }
|
||||||
|
|
||||||
|
// // var grammar = '#JSGF V1.0; grammar colors; public <color> = ' + arr.join(' | ') + ' ;';
|
||||||
|
// // var speechRecognitionList = new SpeechGrammarList();
|
||||||
|
// // speechRecognitionList.addFromString(grammar, 1);
|
||||||
|
// // recognition.grammars = speechRecognitionList;
|
||||||
|
// }
|
||||||
|
|
||||||
|
recognition.onresult = function (event) {
|
||||||
|
var last = event.results.length - 1;
|
||||||
|
var speechResult = event.results[last][0].transcript.toLowerCase();
|
||||||
|
|
||||||
|
diagnosticPara.textContent += speechResult + ' ';
|
||||||
|
|
||||||
|
// console.log('Confidence: ' + event.results[0][0].confidence)
|
||||||
|
console.log('process: ' + speechResult);
|
||||||
|
processSpeech(speechResult);
|
||||||
|
// testBtn.disabled = false
|
||||||
|
// testBtn.textContent = 'record...'
|
||||||
|
};
|
||||||
|
|
||||||
|
function processSpeech (speechResult) {
|
||||||
|
console.log('To dialogflow: ' + speechResult);
|
||||||
|
ws.send(speechResult);
|
||||||
|
|
||||||
|
let timeOut;
|
||||||
|
switch (question) {
|
||||||
|
case 1:
|
||||||
|
timeOut = 6500;
|
||||||
|
break;
|
||||||
|
case 2:
|
||||||
|
answerQuery += speechResult;
|
||||||
|
return;
|
||||||
|
case 3:
|
||||||
|
if (speechResult.includes('uhr')) {
|
||||||
|
speechResult = speechResult.replace('uhr', '');
|
||||||
|
}
|
||||||
|
timeOut = 6500;
|
||||||
|
break;
|
||||||
|
case 4:
|
||||||
|
break;
|
||||||
|
case 5:
|
||||||
|
timeOut = 6500;
|
||||||
|
break;
|
||||||
|
}
|
||||||
|
|
||||||
|
if (state === 'answer') {
|
||||||
|
if (timerId != undefined) {
|
||||||
|
clearTimeout(timerId);
|
||||||
|
}
|
||||||
|
answerQuery += speechResult;
|
||||||
|
timerId = window.setTimeout(
|
||||||
|
function () {
|
||||||
|
// if (!rePrompt) {
|
||||||
|
// ws.send('ich brauche noch etwas Zeit')
|
||||||
|
// } else {
|
||||||
|
console.log('recording end. Evaluate: ' + answerQuery);
|
||||||
|
handleAnswer(answerQuery);
|
||||||
|
answerQuery = '';
|
||||||
|
diagnosticPara.textContent = '';
|
||||||
|
// }
|
||||||
|
recognition.stop();
|
||||||
|
console.log('timer fallback');
|
||||||
|
}, timeOut);
|
||||||
|
} else {
|
||||||
|
console.log('recording end.');
|
||||||
|
recognition.stop();
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// #region speech recognition event
|
||||||
|
recognition.onspeechend = function () {
|
||||||
|
// recognition.stop();
|
||||||
|
// testBtn.disabled = false;
|
||||||
|
// testBtn.textContent = 'Start new test';
|
||||||
|
};
|
||||||
|
|
||||||
|
recognition.onerror = function (event) {
|
||||||
|
testBtn.disabled = false;
|
||||||
|
testBtn.textContent = 'Start new test';
|
||||||
|
diagnosticPara.textContent = 'Error occurred in recognition: ' + event.error;
|
||||||
|
};
|
||||||
|
|
||||||
|
recognition.onaudiostart = function (event) {
|
||||||
|
// Fired when the user agent has started to capture audio.
|
||||||
|
|
||||||
|
};
|
||||||
|
|
||||||
|
recognition.onaudioend = function (event) {
|
||||||
|
|
||||||
|
};
|
||||||
|
|
||||||
|
recognition.onend = function (event) {
|
||||||
|
// Fired when the speech recognition service has disconnected.
|
||||||
|
|
||||||
|
};
|
||||||
|
|
||||||
|
recognition.onnomatch = function (event) {
|
||||||
|
// Fired when the speech recognition service returns a final result with no significant recognition. This may involve some degree of recognition, which doesn't meet or exceed the confidence threshold.
|
||||||
|
// console.log('SpeechRecognition.onnomatch')
|
||||||
|
};
|
||||||
|
|
||||||
|
recognition.onsoundstart = function (event) {
|
||||||
|
// Fired when any sound — recognisable speech or not — has been detected.
|
||||||
|
|
||||||
|
};
|
||||||
|
|
||||||
|
recognition.onsoundend = function (event) {
|
||||||
|
// Fired when any sound — recognisable speech or not — has stopped being detected.
|
||||||
|
|
||||||
|
};
|
||||||
|
|
||||||
|
recognition.onspeechstart = function (event) {
|
||||||
|
// Fired when sound that is recognised by the speech recognition service as speech has been detected.
|
||||||
|
|
||||||
|
};
|
||||||
|
recognition.onstart = function (event) {
|
||||||
|
// Fired when the speech recognition service has begun listening to incoming audio with intent to recognize grammars associated with the current SpeechRecognition.
|
||||||
|
|
||||||
|
};
|
||||||
|
// }
|
||||||
|
// #endregion
|
||||||
|
|
||||||
|
function calculatePoints (tokens, d) {
|
||||||
let points = 0;
|
let points = 0;
|
||||||
|
let dict = {};
|
||||||
|
Object.assign(dict, d);
|
||||||
for (let word of tokens) {
|
for (let word of tokens) {
|
||||||
if (dict[word] !== undefined) {
|
if (dict[word] !== undefined) {
|
||||||
points += dict[word];
|
points += dict[word];
|
||||||
|
delete dict[word];
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
return points;
|
return points;
|
||||||
|
Loading…
x
Reference in New Issue
Block a user