|
|
@@ -87,7 +87,7 @@ var diagnosticPara = document.querySelector('.output'); |
|
|
|
var testBtn = document.querySelector('button'); |
|
|
|
var testBtn2 = document.getElementById('speechBtn'); |
|
|
|
var infoPara = document.getElementById('info'); |
|
|
|
var userPrompt = document.getElementById('query'); |
|
|
|
var questionNumDisplay = document.querySelector('.quest'); |
|
|
|
// #endregion |
|
|
|
|
|
|
|
// websocket to communicate with the server |
|
|
@@ -154,7 +154,7 @@ ws.onopen = function () { |
|
|
|
ws.onmessage = function (payload) { |
|
|
|
var dialogflowResult = JSON.parse(payload.data); |
|
|
|
checkIntent(dialogflowResult); |
|
|
|
document.querySelector('h1').innerHTML = dialogflowResult.intent.displayName; |
|
|
|
// document.querySelector('h1').innerHTML = dialogflowResult.intent.displayName; |
|
|
|
}; |
|
|
|
// #endregion |
|
|
|
|
|
|
@@ -215,6 +215,7 @@ function checkIntent (result) { |
|
|
|
function startQuestion (number) { |
|
|
|
question = number; |
|
|
|
state = 'answer'; |
|
|
|
questionNumDisplay.textContent = 'Question: ' + question; |
|
|
|
handleQuestion(); |
|
|
|
} |
|
|
|
|
|
|
@@ -263,8 +264,11 @@ function readQuestionTwo () { |
|
|
|
window.setTimeout( |
|
|
|
function () { |
|
|
|
recognition.stop(); |
|
|
|
console.log('recognition stopped'); |
|
|
|
handleAnswer(answerQuery); |
|
|
|
window.setTimeout( |
|
|
|
function () { |
|
|
|
handleAnswer(answerQuery); |
|
|
|
answerQuery = ''; |
|
|
|
}, 3000); |
|
|
|
}, 6000); |
|
|
|
recognition.start(); |
|
|
|
console.log('reocgnition started. Question: ' + question); |
|
|
@@ -369,39 +373,6 @@ function handleAnswerToThirdQuestion (query) { |
|
|
|
} |
|
|
|
// #endregion |
|
|
|
|
|
|
|
// #region global functions |
|
|
|
function startDemenzScreening () { |
|
|
|
ws.send('starte demenz test'); |
|
|
|
// startQuestion(2); |
|
|
|
testBtn.disabled = true; |
|
|
|
testBtn.textContent = 'Test in progress'; |
|
|
|
infoPara.textContent = 'wait...'; |
|
|
|
diagnosticPara.textContent = 'detecting...'; |
|
|
|
} |
|
|
|
|
|
|
|
function speak (sentence) { |
|
|
|
speechsynth.text = sentence; |
|
|
|
window.speechSynthesis.speak(speechsynth); |
|
|
|
} |
|
|
|
|
|
|
|
function testSpeechOut () { |
|
|
|
answerQuery = 'apfel wiese tisch apfel lampe pferd'; |
|
|
|
question = 1; |
|
|
|
for (let i = 0; i < 2; i++) { |
|
|
|
var tokens = answerQuery.split(new RegExp(separators.join('|'), 'g')); |
|
|
|
questionPoints[question] += calculatePoints(tokens, QUESTION_ONE_ANSWERS); |
|
|
|
} |
|
|
|
|
|
|
|
console.log(questionPoints[question]); |
|
|
|
|
|
|
|
// speechsynth.text = 'test 123'; |
|
|
|
// speechsynth.volume = 1; |
|
|
|
// speechsynth.rate = 1; |
|
|
|
// console.log(speechsynth); |
|
|
|
// window.speechSynthesis.speak(speechsynth); |
|
|
|
// console.log(window.speechSynthesis); |
|
|
|
} |
|
|
|
|
|
|
|
// function recognizeSpeech () { |
|
|
|
// if (state === 'answer') { |
|
|
|
// var arr; |
|
|
@@ -427,6 +398,7 @@ function testSpeechOut () { |
|
|
|
// // recognition.grammars = speechRecognitionList; |
|
|
|
// } |
|
|
|
|
|
|
|
// #region speech recognition event |
|
|
|
recognition.onresult = function (event) { |
|
|
|
var last = event.results.length - 1; |
|
|
|
var speechResult = event.results[last][0].transcript.toLowerCase(); |
|
|
@@ -439,7 +411,60 @@ recognition.onresult = function (event) { |
|
|
|
// testBtn.disabled = false |
|
|
|
// testBtn.textContent = 'record...' |
|
|
|
}; |
|
|
|
1; |
|
|
|
recognition.onspeechend = function () { |
|
|
|
// recognition.stop(); |
|
|
|
// testBtn.disabled = false; |
|
|
|
// testBtn.textContent = 'Start new test'; |
|
|
|
}; |
|
|
|
|
|
|
|
recognition.onerror = function (event) { |
|
|
|
testBtn.disabled = false; |
|
|
|
testBtn.textContent = 'Start new test'; |
|
|
|
diagnosticPara.textContent = 'Error occurred in recognition: ' + event.error; |
|
|
|
}; |
|
|
|
|
|
|
|
recognition.onaudiostart = function (event) { |
|
|
|
// Fired when the user agent has started to capture audio. |
|
|
|
|
|
|
|
}; |
|
|
|
|
|
|
|
recognition.onaudioend = function (event) { |
|
|
|
|
|
|
|
}; |
|
|
|
|
|
|
|
recognition.onend = function (event) { |
|
|
|
// Fired when the speech recognition service has disconnected. |
|
|
|
|
|
|
|
}; |
|
|
|
|
|
|
|
recognition.onnomatch = function (event) { |
|
|
|
// Fired when the speech recognition service returns a final result with no significant recognition. This may involve some degree of recognition, which doesn't meet or exceed the confidence threshold. |
|
|
|
// console.log('SpeechRecognition.onnomatch') |
|
|
|
}; |
|
|
|
|
|
|
|
recognition.onsoundstart = function (event) { |
|
|
|
// Fired when any sound — recognisable speech or not — has been detected. |
|
|
|
|
|
|
|
}; |
|
|
|
|
|
|
|
recognition.onsoundend = function (event) { |
|
|
|
// Fired when any sound — recognisable speech or not — has stopped being detected. |
|
|
|
|
|
|
|
}; |
|
|
|
|
|
|
|
recognition.onspeechstart = function (event) { |
|
|
|
// Fired when sound that is recognised by the speech recognition service as speech has been detected. |
|
|
|
|
|
|
|
}; |
|
|
|
recognition.onstart = function (event) { |
|
|
|
// Fired when the speech recognition service has begun listening to incoming audio with intent to recognize grammars associated with the current SpeechRecognition. |
|
|
|
|
|
|
|
}; |
|
|
|
// } |
|
|
|
// #endregion |
|
|
|
|
|
|
|
// #region global functions |
|
|
|
function processSpeech (speechResult) { |
|
|
|
console.log('To dialogflow: ' + speechResult); |
|
|
|
ws.send(speechResult); |
|
|
@@ -489,58 +514,37 @@ function processSpeech (speechResult) { |
|
|
|
} |
|
|
|
} |
|
|
|
|
|
|
|
// #region speech recognition event |
|
|
|
recognition.onspeechend = function () { |
|
|
|
// recognition.stop(); |
|
|
|
// testBtn.disabled = false; |
|
|
|
// testBtn.textContent = 'Start new test'; |
|
|
|
}; |
|
|
|
|
|
|
|
recognition.onerror = function (event) { |
|
|
|
testBtn.disabled = false; |
|
|
|
testBtn.textContent = 'Start new test'; |
|
|
|
diagnosticPara.textContent = 'Error occurred in recognition: ' + event.error; |
|
|
|
}; |
|
|
|
|
|
|
|
recognition.onaudiostart = function (event) { |
|
|
|
// Fired when the user agent has started to capture audio. |
|
|
|
|
|
|
|
}; |
|
|
|
|
|
|
|
recognition.onaudioend = function (event) { |
|
|
|
|
|
|
|
}; |
|
|
|
|
|
|
|
recognition.onend = function (event) { |
|
|
|
// Fired when the speech recognition service has disconnected. |
|
|
|
|
|
|
|
}; |
|
|
|
|
|
|
|
recognition.onnomatch = function (event) { |
|
|
|
// Fired when the speech recognition service returns a final result with no significant recognition. This may involve some degree of recognition, which doesn't meet or exceed the confidence threshold. |
|
|
|
// console.log('SpeechRecognition.onnomatch') |
|
|
|
}; |
|
|
|
|
|
|
|
recognition.onsoundstart = function (event) { |
|
|
|
// Fired when any sound — recognisable speech or not — has been detected. |
|
|
|
|
|
|
|
}; |
|
|
|
|
|
|
|
recognition.onsoundend = function (event) { |
|
|
|
// Fired when any sound — recognisable speech or not — has stopped being detected. |
|
|
|
function startDemenzScreening () { |
|
|
|
// ws.send('starte demenz test'); |
|
|
|
startQuestion(2); |
|
|
|
testBtn.disabled = true; |
|
|
|
testBtn.textContent = 'Test in progress'; |
|
|
|
infoPara.textContent = 'wait...'; |
|
|
|
diagnosticPara.textContent = 'detecting...'; |
|
|
|
} |
|
|
|
|
|
|
|
}; |
|
|
|
function testSpeechOut () { |
|
|
|
answerQuery = 'apfel wiese tisch apfel lampe pferd'; |
|
|
|
question = 1; |
|
|
|
for (let i = 0; i < 2; i++) { |
|
|
|
var tokens = answerQuery.split(new RegExp(separators.join('|'), 'g')); |
|
|
|
questionPoints[question] += calculatePoints(tokens, QUESTION_ONE_ANSWERS); |
|
|
|
} |
|
|
|
|
|
|
|
recognition.onspeechstart = function (event) { |
|
|
|
// Fired when sound that is recognised by the speech recognition service as speech has been detected. |
|
|
|
console.log(questionPoints[question]); |
|
|
|
|
|
|
|
}; |
|
|
|
recognition.onstart = function (event) { |
|
|
|
// Fired when the speech recognition service has begun listening to incoming audio with intent to recognize grammars associated with the current SpeechRecognition. |
|
|
|
// speechsynth.text = 'test 123'; |
|
|
|
// speechsynth.volume = 1; |
|
|
|
// speechsynth.rate = 1; |
|
|
|
// console.log(speechsynth); |
|
|
|
// window.speechSynthesis.speak(speechsynth); |
|
|
|
// console.log(window.speechSynthesis); |
|
|
|
} |
|
|
|
|
|
|
|
}; |
|
|
|
// } |
|
|
|
// #endregion |
|
|
|
function speak (sentence) { |
|
|
|
speechsynth.text = sentence; |
|
|
|
window.speechSynthesis.speak(speechsynth); |
|
|
|
} |
|
|
|
|
|
|
|
function calculatePoints (tokens, d) { |
|
|
|
let points = 0; |