|
|
|
|
|
|
|
|
// #endregion |
|
|
// #endregion |
|
|
|
|
|
|
|
|
// #region points |
|
|
// #region points |
|
|
var questionOnePoints = 0; |
|
|
|
|
|
var questionTwoPoints = 0; |
|
|
|
|
|
var questionThreePoints = 0; |
|
|
|
|
|
var questionFourPoints = 0; |
|
|
|
|
|
var questionFivePoints = 0; |
|
|
|
|
|
var questionSixPoints = 0; |
|
|
|
|
|
// #endregion |
|
|
|
|
|
|
|
|
const questionPoints = { |
|
|
|
|
|
1: 0, |
|
|
|
|
|
2: 0, |
|
|
|
|
|
3: 0, |
|
|
|
|
|
4: 0, |
|
|
|
|
|
5: 0 }; |
|
|
|
|
|
// #endregion |
|
|
|
|
|
|
|
|
// tokenization |
|
|
// tokenization |
|
|
const separators = [' ', '\\\+', '-', '\\\(', '\\\)', '\\*', '/', ':', '\\\?']; |
|
|
const separators = [' ', '\\\+', '-', '\\\(', '\\\)', '\\*', '/', ':', '\\\?']; |
|
|
|
|
|
|
|
|
break; |
|
|
break; |
|
|
} |
|
|
} |
|
|
if (!skipRecording) { |
|
|
if (!skipRecording) { |
|
|
recognizeSpeech(); |
|
|
|
|
|
|
|
|
recognition.start(); |
|
|
|
|
|
console.log('reocgnition started. Question: ' + question); |
|
|
} |
|
|
} |
|
|
skipRecording = false; |
|
|
skipRecording = false; |
|
|
diagnosticPara = ''; |
|
|
|
|
|
|
|
|
diagnosticPara.textContent = ''; |
|
|
console.log('global speech end'); |
|
|
console.log('global speech end'); |
|
|
}; |
|
|
}; |
|
|
// #endregion |
|
|
// #endregion |
|
|
|
|
|
|
|
|
window.speechSynthesis.speak(utterance); |
|
|
window.speechSynthesis.speak(utterance); |
|
|
if (i === 9) { |
|
|
if (i === 9) { |
|
|
utterance.onend = function (event) { |
|
|
utterance.onend = function (event) { |
|
|
recognizeSpeech(); |
|
|
|
|
|
|
|
|
recognition.start(); |
|
|
|
|
|
console.log('reocgnition started. Question: ' + question); |
|
|
}; |
|
|
}; |
|
|
} |
|
|
} |
|
|
} |
|
|
} |
|
|
|
|
|
|
|
|
window.setTimeout( |
|
|
window.setTimeout( |
|
|
function () { |
|
|
function () { |
|
|
recognition.stop(); |
|
|
recognition.stop(); |
|
|
|
|
|
console.log('recognition stopped'); |
|
|
handleAnswer(answerQuery); |
|
|
handleAnswer(answerQuery); |
|
|
}, 60000); |
|
|
|
|
|
recognizeSpeech(); |
|
|
|
|
|
|
|
|
}, 6000); |
|
|
|
|
|
recognition.start(); |
|
|
|
|
|
console.log('reocgnition started. Question: ' + question); |
|
|
}; |
|
|
}; |
|
|
} |
|
|
} |
|
|
|
|
|
|
|
|
function readQuestionThree () { |
|
|
function readQuestionThree () { |
|
|
recognition = false; |
|
|
|
|
|
speak('Dankeschön. Weiter geht es mit der nächsten Frage. '); |
|
|
|
|
|
|
|
|
skipRecording = true; |
|
|
|
|
|
speak('Dankeschön, weiter geht es mit der nächsten Frage.'); |
|
|
let utterance = new SpeechSynthesisUtterance(); |
|
|
let utterance = new SpeechSynthesisUtterance(); |
|
|
utterance.voice = voices[2]; |
|
|
utterance.voice = voices[2]; |
|
|
utterance.text = QUESTION_THREE; |
|
|
utterance.text = QUESTION_THREE; |
|
|
window.speechSynthesis.speak(utterance); |
|
|
window.speechSynthesis.speak(utterance); |
|
|
utterance.onend = function (event) { |
|
|
utterance.onend = function (event) { |
|
|
console.log('speach end'); |
|
|
|
|
|
speak(QUESTION_THREE_QUESTIONS_PT1[questionThreeCount]); |
|
|
speak(QUESTION_THREE_QUESTIONS_PT1[questionThreeCount]); |
|
|
}; |
|
|
}; |
|
|
utterance.onerror = function (event) { |
|
|
utterance.onerror = function (event) { |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
function handleAnswerToFirstQuestion (answer) { |
|
|
function handleAnswerToFirstQuestion (answer) { |
|
|
var tokens = answer.split(new RegExp(separators.join('|'), 'g')); |
|
|
var tokens = answer.split(new RegExp(separators.join('|'), 'g')); |
|
|
questionOnePoints += calculatePoints(tokens, QUESTION_ONE_ANSWERS); |
|
|
|
|
|
|
|
|
questionPoints[question] += calculatePoints(tokens, QUESTION_ONE_ANSWERS); |
|
|
if (partTwo) { |
|
|
if (partTwo) { |
|
|
partTwo = false; |
|
|
partTwo = false; |
|
|
|
|
|
console.log('question 1 points: ' + questionPoints[question]); |
|
|
skipRecording = true; |
|
|
skipRecording = true; |
|
|
speak('Vielen Dank, nun geht es weiter mit der nächsten Frage'); |
|
|
speak('Vielen Dank, nun geht es weiter mit der nächsten Frage'); |
|
|
startQuestion(2); |
|
|
startQuestion(2); |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
function handleAnswerToSecondQuestion (answer) { |
|
|
function handleAnswerToSecondQuestion (answer) { |
|
|
var tokens = answer.split(new RegExp(separators.join('|'), 'g')); |
|
|
var tokens = answer.split(new RegExp(separators.join('|'), 'g')); |
|
|
questionTwoPoints = calculatePoints(tokens, QUESTION_TWO_ANSWERS); |
|
|
|
|
|
|
|
|
questionPoints[question] = calculatePoints(tokens, QUESTION_TWO_ANSWERS); |
|
|
|
|
|
console.log('question 2 points: ' + questionPoints[question]); |
|
|
startQuestion(3); |
|
|
startQuestion(3); |
|
|
// state = 'detect' |
|
|
// state = 'detect' |
|
|
} |
|
|
} |
|
|
|
|
|
|
|
|
strike = 0; |
|
|
strike = 0; |
|
|
partTwo = false; |
|
|
partTwo = false; |
|
|
questionThreeCount++; |
|
|
questionThreeCount++; |
|
|
questionThreePoints = questionThreeCount + 1; |
|
|
|
|
|
|
|
|
questionPoints[question] = questionThreeCount + 1; |
|
|
questionArray = QUESTION_THREE_QUESTIONS_PT1; |
|
|
questionArray = QUESTION_THREE_QUESTIONS_PT1; |
|
|
} else { |
|
|
} else { |
|
|
strike++; |
|
|
strike++; |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
if (strike === 2 || questionThreeCount === 5) { |
|
|
if (strike === 2 || questionThreeCount === 5) { |
|
|
speechsynth.rate = 1; |
|
|
speechsynth.rate = 1; |
|
|
|
|
|
console.log('question 3 points: ' + questionPoints[question]); |
|
|
skipRecording = true; |
|
|
skipRecording = true; |
|
|
speak('weiter geht es mit der Nächsten Frage'); |
|
|
speak('weiter geht es mit der Nächsten Frage'); |
|
|
startQuestion(4); |
|
|
startQuestion(4); |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
speak(questionArray[questionThreeCount]); |
|
|
speak(questionArray[questionThreeCount]); |
|
|
|
|
|
|
|
|
console.log('count: ' + questionThreeCount + ', strike: ' + strike + ', points: ' + questionThreePoints); |
|
|
|
|
|
|
|
|
console.log('count: ' + questionThreeCount + ', strike: ' + strike + ', points: ' + questionPoints[question]); |
|
|
} |
|
|
} |
|
|
// #endregion |
|
|
// #endregion |
|
|
|
|
|
|
|
|
// #region global functions |
|
|
// #region global functions |
|
|
function startDemenzScreening () { |
|
|
function startDemenzScreening () { |
|
|
ws.send('starte demenz test'); |
|
|
ws.send('starte demenz test'); |
|
|
|
|
|
// startQuestion(2); |
|
|
testBtn.disabled = true; |
|
|
testBtn.disabled = true; |
|
|
testBtn.textContent = 'Test in progress'; |
|
|
testBtn.textContent = 'Test in progress'; |
|
|
infoPara.textContent = 'wait...'; |
|
|
infoPara.textContent = 'wait...'; |
|
|
|
|
|
|
|
|
} |
|
|
} |
|
|
|
|
|
|
|
|
function testSpeechOut () { |
|
|
function testSpeechOut () { |
|
|
console.log('click'); |
|
|
|
|
|
speechsynth.text = 'test 123'; |
|
|
|
|
|
speechsynth.volume = 1; |
|
|
|
|
|
speechsynth.rate = 1; |
|
|
|
|
|
console.log(speechsynth); |
|
|
|
|
|
window.speechSynthesis.speak(speechsynth); |
|
|
|
|
|
console.log(window.speechSynthesis); |
|
|
|
|
|
|
|
|
answerQuery = 'apfel wiese tisch apfel lampe pferd'; |
|
|
|
|
|
question = 1; |
|
|
|
|
|
for (let i = 0; i < 2; i++) { |
|
|
|
|
|
var tokens = answerQuery.split(new RegExp(separators.join('|'), 'g')); |
|
|
|
|
|
questionPoints[question] += calculatePoints(tokens, QUESTION_ONE_ANSWERS); |
|
|
|
|
|
} |
|
|
|
|
|
|
|
|
|
|
|
console.log(questionPoints[question]); |
|
|
|
|
|
|
|
|
|
|
|
// speechsynth.text = 'test 123'; |
|
|
|
|
|
// speechsynth.volume = 1; |
|
|
|
|
|
// speechsynth.rate = 1; |
|
|
|
|
|
// console.log(speechsynth); |
|
|
|
|
|
// window.speechSynthesis.speak(speechsynth); |
|
|
|
|
|
// console.log(window.speechSynthesis); |
|
|
} |
|
|
} |
|
|
|
|
|
|
|
|
function recognizeSpeech () { |
|
|
|
|
|
// if (state === 'answer') { |
|
|
|
|
|
// var arr; |
|
|
|
|
|
// switch (question) { |
|
|
|
|
|
// case 1: |
|
|
|
|
|
// arr = QUESTION_ONE_QUESTIONS; |
|
|
|
|
|
// break; |
|
|
|
|
|
// case 2: |
|
|
|
|
|
// // arr = QUESTION_TWO_QUESTIONS; |
|
|
|
|
|
// break; |
|
|
|
|
|
// case 3: |
|
|
|
|
|
// arr = [1, 2, 3, 4, 5, 6, 7, 8, 9]; |
|
|
|
|
|
// break; |
|
|
|
|
|
// case 4: |
|
|
|
|
|
// break; |
|
|
|
|
|
// case 5: |
|
|
|
|
|
// break; |
|
|
|
|
|
// } |
|
|
|
|
|
|
|
|
|
|
|
// // var grammar = '#JSGF V1.0; grammar colors; public <color> = ' + arr.join(' | ') + ' ;'; |
|
|
|
|
|
// // var speechRecognitionList = new SpeechGrammarList(); |
|
|
|
|
|
// // speechRecognitionList.addFromString(grammar, 1); |
|
|
|
|
|
// // recognition.grammars = speechRecognitionList; |
|
|
|
|
|
// } |
|
|
|
|
|
recognition.start(); |
|
|
|
|
|
console.log('reocgnition started. Question: ' + question); |
|
|
|
|
|
|
|
|
|
|
|
recognition.onresult = function (event) { |
|
|
|
|
|
var last = event.results.length - 1; |
|
|
|
|
|
var speechResult = event.results[last][0].transcript.toLowerCase(); |
|
|
|
|
|
|
|
|
|
|
|
diagnosticPara.textContent += speechResult + ' '; |
|
|
|
|
|
|
|
|
|
|
|
// console.log('Confidence: ' + event.results[0][0].confidence) |
|
|
|
|
|
console.log('process: ' + speechResult); |
|
|
|
|
|
processSpeech(speechResult); |
|
|
|
|
|
// testBtn.disabled = false |
|
|
|
|
|
// testBtn.textContent = 'record...' |
|
|
|
|
|
}; |
|
|
|
|
|
|
|
|
// function recognizeSpeech () { |
|
|
|
|
|
// if (state === 'answer') { |
|
|
|
|
|
// var arr; |
|
|
|
|
|
// switch (question) { |
|
|
|
|
|
// case 1: |
|
|
|
|
|
// arr = QUESTION_ONE_QUESTIONS; |
|
|
|
|
|
// break; |
|
|
|
|
|
// case 2: |
|
|
|
|
|
// // arr = QUESTION_TWO_QUESTIONS; |
|
|
|
|
|
// break; |
|
|
|
|
|
// case 3: |
|
|
|
|
|
// arr = [1, 2, 3, 4, 5, 6, 7, 8, 9]; |
|
|
|
|
|
// break; |
|
|
|
|
|
// case 4: |
|
|
|
|
|
// break; |
|
|
|
|
|
// case 5: |
|
|
|
|
|
// break; |
|
|
|
|
|
// } |
|
|
|
|
|
|
|
|
|
|
|
// // var grammar = '#JSGF V1.0; grammar colors; public <color> = ' + arr.join(' | ') + ' ;'; |
|
|
|
|
|
// // var speechRecognitionList = new SpeechGrammarList(); |
|
|
|
|
|
// // speechRecognitionList.addFromString(grammar, 1); |
|
|
|
|
|
// // recognition.grammars = speechRecognitionList; |
|
|
|
|
|
// } |
|
|
|
|
|
|
|
|
|
|
|
recognition.onresult = function (event) { |
|
|
|
|
|
var last = event.results.length - 1; |
|
|
|
|
|
var speechResult = event.results[last][0].transcript.toLowerCase(); |
|
|
|
|
|
|
|
|
|
|
|
diagnosticPara.textContent += speechResult + ' '; |
|
|
|
|
|
|
|
|
|
|
|
// console.log('Confidence: ' + event.results[0][0].confidence) |
|
|
|
|
|
console.log('process: ' + speechResult); |
|
|
|
|
|
processSpeech(speechResult); |
|
|
|
|
|
// testBtn.disabled = false |
|
|
|
|
|
// testBtn.textContent = 'record...' |
|
|
|
|
|
}; |
|
|
|
|
|
|
|
|
function processSpeech (speechResult) { |
|
|
|
|
|
console.log('To dialogflow: ' + speechResult); |
|
|
|
|
|
ws.send(speechResult); |
|
|
|
|
|
|
|
|
|
|
|
let timeOut; |
|
|
|
|
|
switch (question) { |
|
|
|
|
|
case 1: |
|
|
|
|
|
timeOut = 6500; |
|
|
|
|
|
break; |
|
|
|
|
|
case 2: |
|
|
|
|
|
answerQuery += speechResult; |
|
|
|
|
|
return; |
|
|
|
|
|
case 3: |
|
|
|
|
|
if (speechResult.includes('uhr')) { |
|
|
|
|
|
speechResult = speechResult.replace('uhr', ''); |
|
|
|
|
|
} |
|
|
|
|
|
timeOut = 6500; |
|
|
|
|
|
break; |
|
|
|
|
|
case 4: |
|
|
|
|
|
break; |
|
|
|
|
|
case 5: |
|
|
|
|
|
timeOut = 6500; |
|
|
|
|
|
break; |
|
|
|
|
|
} |
|
|
|
|
|
|
|
|
function processSpeech (speechResult) { |
|
|
|
|
|
console.log('To dialogflow: ' + speechResult); |
|
|
|
|
|
ws.send(speechResult); |
|
|
|
|
|
|
|
|
if (state === 'answer') { |
|
|
|
|
|
if (timerId != undefined) { |
|
|
|
|
|
clearTimeout(timerId); |
|
|
|
|
|
} |
|
|
|
|
|
|
|
|
let timeOut; |
|
|
|
|
|
switch (question) { |
|
|
|
|
|
case 1: |
|
|
|
|
|
timeOut = 6500; |
|
|
|
|
|
break; |
|
|
|
|
|
case 2: |
|
|
answerQuery += speechResult; |
|
|
answerQuery += speechResult; |
|
|
timerId = window.setTimeout( |
|
|
|
|
|
function () { |
|
|
|
|
|
// if (!rePrompt) { |
|
|
|
|
|
// ws.send('ich brauche noch etwas Zeit') |
|
|
|
|
|
// } else { |
|
|
|
|
|
console.log('recording end. Evaluate: ' + answerQuery); |
|
|
|
|
|
handleAnswer(answerQuery); |
|
|
|
|
|
answerQuery = ''; |
|
|
|
|
|
diagnosticPara.textContent = ''; |
|
|
|
|
|
// } |
|
|
|
|
|
recognition.stop(); |
|
|
|
|
|
console.log('timer fallback'); |
|
|
|
|
|
}, timeOut); |
|
|
|
|
|
} else { |
|
|
|
|
|
console.log('recording end.'); |
|
|
|
|
|
recognition.stop(); |
|
|
|
|
|
|
|
|
return; |
|
|
|
|
|
case 3: |
|
|
|
|
|
if (speechResult.includes('uhr')) { |
|
|
|
|
|
speechResult = speechResult.replace('uhr', ''); |
|
|
|
|
|
} |
|
|
|
|
|
timeOut = 6500; |
|
|
|
|
|
break; |
|
|
|
|
|
case 4: |
|
|
|
|
|
break; |
|
|
|
|
|
case 5: |
|
|
|
|
|
timeOut = 6500; |
|
|
|
|
|
break; |
|
|
|
|
|
} |
|
|
|
|
|
|
|
|
|
|
|
if (state === 'answer') { |
|
|
|
|
|
if (timerId != undefined) { |
|
|
|
|
|
clearTimeout(timerId); |
|
|
} |
|
|
} |
|
|
|
|
|
answerQuery += speechResult; |
|
|
|
|
|
timerId = window.setTimeout( |
|
|
|
|
|
function () { |
|
|
|
|
|
// if (!rePrompt) { |
|
|
|
|
|
// ws.send('ich brauche noch etwas Zeit') |
|
|
|
|
|
// } else { |
|
|
|
|
|
console.log('recording end. Evaluate: ' + answerQuery); |
|
|
|
|
|
handleAnswer(answerQuery); |
|
|
|
|
|
answerQuery = ''; |
|
|
|
|
|
diagnosticPara.textContent = ''; |
|
|
|
|
|
// } |
|
|
|
|
|
recognition.stop(); |
|
|
|
|
|
console.log('timer fallback'); |
|
|
|
|
|
}, timeOut); |
|
|
|
|
|
} else { |
|
|
|
|
|
console.log('recording end.'); |
|
|
|
|
|
recognition.stop(); |
|
|
} |
|
|
} |
|
|
|
|
|
} |
|
|
|
|
|
|
|
|
recognition.onspeechend = function () { |
|
|
|
|
|
// recognition.stop(); |
|
|
|
|
|
// testBtn.disabled = false; |
|
|
|
|
|
// testBtn.textContent = 'Start new test'; |
|
|
|
|
|
}; |
|
|
|
|
|
|
|
|
// #region speech recognition event |
|
|
|
|
|
recognition.onspeechend = function () { |
|
|
|
|
|
// recognition.stop(); |
|
|
|
|
|
// testBtn.disabled = false; |
|
|
|
|
|
// testBtn.textContent = 'Start new test'; |
|
|
|
|
|
}; |
|
|
|
|
|
|
|
|
recognition.onerror = function (event) { |
|
|
|
|
|
testBtn.disabled = false; |
|
|
|
|
|
testBtn.textContent = 'Start new test'; |
|
|
|
|
|
diagnosticPara.textContent = 'Error occurred in recognition: ' + event.error; |
|
|
|
|
|
}; |
|
|
|
|
|
|
|
|
recognition.onerror = function (event) { |
|
|
|
|
|
testBtn.disabled = false; |
|
|
|
|
|
testBtn.textContent = 'Start new test'; |
|
|
|
|
|
diagnosticPara.textContent = 'Error occurred in recognition: ' + event.error; |
|
|
|
|
|
}; |
|
|
|
|
|
|
|
|
recognition.onaudiostart = function (event) { |
|
|
|
|
|
// Fired when the user agent has started to capture audio. |
|
|
|
|
|
|
|
|
recognition.onaudiostart = function (event) { |
|
|
|
|
|
// Fired when the user agent has started to capture audio. |
|
|
|
|
|
|
|
|
}; |
|
|
|
|
|
|
|
|
}; |
|
|
|
|
|
|
|
|
recognition.onaudioend = function (event) { |
|
|
|
|
|
|
|
|
recognition.onaudioend = function (event) { |
|
|
|
|
|
|
|
|
}; |
|
|
|
|
|
|
|
|
}; |
|
|
|
|
|
|
|
|
recognition.onend = function (event) { |
|
|
|
|
|
// Fired when the speech recognition service has disconnected. |
|
|
|
|
|
|
|
|
recognition.onend = function (event) { |
|
|
|
|
|
// Fired when the speech recognition service has disconnected. |
|
|
|
|
|
|
|
|
}; |
|
|
|
|
|
|
|
|
}; |
|
|
|
|
|
|
|
|
recognition.onnomatch = function (event) { |
|
|
|
|
|
// Fired when the speech recognition service returns a final result with no significant recognition. This may involve some degree of recognition, which doesn't meet or exceed the confidence threshold. |
|
|
|
|
|
// console.log('SpeechRecognition.onnomatch') |
|
|
|
|
|
}; |
|
|
|
|
|
|
|
|
recognition.onnomatch = function (event) { |
|
|
|
|
|
// Fired when the speech recognition service returns a final result with no significant recognition. This may involve some degree of recognition, which doesn't meet or exceed the confidence threshold. |
|
|
|
|
|
// console.log('SpeechRecognition.onnomatch') |
|
|
|
|
|
}; |
|
|
|
|
|
|
|
|
recognition.onsoundstart = function (event) { |
|
|
|
|
|
// Fired when any sound — recognisable speech or not — has been detected. |
|
|
|
|
|
|
|
|
recognition.onsoundstart = function (event) { |
|
|
|
|
|
// Fired when any sound — recognisable speech or not — has been detected. |
|
|
|
|
|
|
|
|
}; |
|
|
|
|
|
|
|
|
}; |
|
|
|
|
|
|
|
|
recognition.onsoundend = function (event) { |
|
|
|
|
|
// Fired when any sound — recognisable speech or not — has stopped being detected. |
|
|
|
|
|
|
|
|
recognition.onsoundend = function (event) { |
|
|
|
|
|
// Fired when any sound — recognisable speech or not — has stopped being detected. |
|
|
|
|
|
|
|
|
}; |
|
|
|
|
|
|
|
|
}; |
|
|
|
|
|
|
|
|
recognition.onspeechstart = function (event) { |
|
|
|
|
|
// Fired when sound that is recognised by the speech recognition service as speech has been detected. |
|
|
|
|
|
|
|
|
recognition.onspeechstart = function (event) { |
|
|
|
|
|
// Fired when sound that is recognised by the speech recognition service as speech has been detected. |
|
|
|
|
|
|
|
|
}; |
|
|
|
|
|
recognition.onstart = function (event) { |
|
|
|
|
|
// Fired when the speech recognition service has begun listening to incoming audio with intent to recognize grammars associated with the current SpeechRecognition. |
|
|
|
|
|
|
|
|
}; |
|
|
|
|
|
recognition.onstart = function (event) { |
|
|
|
|
|
// Fired when the speech recognition service has begun listening to incoming audio with intent to recognize grammars associated with the current SpeechRecognition. |
|
|
|
|
|
|
|
|
}; |
|
|
|
|
|
} |
|
|
|
|
|
|
|
|
}; |
|
|
|
|
|
// } |
|
|
|
|
|
// #endregion |
|
|
|
|
|
|
|
|
function calculatePoints (tokens, dict) { |
|
|
|
|
|
|
|
|
function calculatePoints (tokens, d) { |
|
|
let points = 0; |
|
|
let points = 0; |
|
|
|
|
|
let dict = {}; |
|
|
|
|
|
Object.assign(dict, d); |
|
|
for (let word of tokens) { |
|
|
for (let word of tokens) { |
|
|
if (dict[word] !== undefined) { |
|
|
if (dict[word] !== undefined) { |
|
|
points += dict[word]; |
|
|
points += dict[word]; |
|
|
|
|
|
delete dict[word]; |
|
|
} |
|
|
} |
|
|
} |
|
|
} |
|
|
return points; |
|
|
return points; |