summaryrefslogtreecommitdiff
path: root/ext/mixed/js
diff options
context:
space:
mode:
authortoasted-nutbread <toasted-nutbread@users.noreply.github.com>2021-01-22 22:10:27 -0500
committerGitHub <noreply@github.com>2021-01-22 22:10:27 -0500
commit7fbfef513d1336a883968f319c7001b4bb04876d (patch)
treedf3098aee59805822d2e3028afad9df8d509104d /ext/mixed/js
parenta51a591c404e365bc1fca657bb26c04d165f400b (diff)
Display audio update (#1291)
* Move createExpressionAudio to DisplayAudio * Move createAudioFromInfo to DisplayAudio * Update TextToSpeechAudio
Diffstat (limited to 'ext/mixed/js')
-rw-r--r--ext/mixed/js/audio-system.js45
-rw-r--r--ext/mixed/js/display-audio.js47
-rw-r--r--ext/mixed/js/text-to-speech-audio.js10
3 files changed, 52 insertions, 50 deletions
diff --git a/ext/mixed/js/audio-system.js b/ext/mixed/js/audio-system.js
index ab6011d0..0933d90e 100644
--- a/ext/mixed/js/audio-system.js
+++ b/ext/mixed/js/audio-system.js
@@ -16,14 +16,11 @@
*/
/* global
- * CacheMap
* TextToSpeechAudio
- * api
*/
class AudioSystem {
- constructor(useCache) {
- this._cache = new CacheMap(useCache ? 32 : 0);
+ constructor() {
this._fallbackAudio = null;
}
@@ -36,35 +33,6 @@ class AudioSystem {
eventListeners.addEventListener(speechSynthesis, 'voiceschanged', onVoicesChanged, false);
}
- async createExpressionAudio(sources, expression, reading, details) {
- const key = JSON.stringify([expression, reading]);
-
- const cacheValue = this._cache.get(key);
- if (typeof cacheValue !== 'undefined') {
- return cacheValue;
- }
-
- for (let i = 0, ii = sources.length; i < ii; ++i) {
- const source = sources[i];
- const infoList = await await api.getExpressionAudioInfoList(source, expression, reading, details);
- for (let j = 0, jj = infoList.length; j < jj; ++j) {
- const info = infoList[j];
- let audio;
- try {
- audio = await this.createAudioFromInfo(info, source);
- } catch (e) {
- continue;
- }
-
- const result = {audio, source, infoList, infoListIndex: j};
- this._cache.set(key, result);
- return result;
- }
- }
-
- throw new Error('Could not create audio');
- }
-
getFallbackAudio() {
if (this._fallbackAudio === null) {
this._fallbackAudio = new Audio('/mixed/mp3/button.mp3');
@@ -94,17 +62,6 @@ class AudioSystem {
return new TextToSpeechAudio(text, voice);
}
- async createAudioFromInfo(info, source) {
- switch (info.type) {
- case 'url':
- return await this.createAudio(info.url, source);
- case 'tts':
- return this.createTextToSpeechAudio(info.text, info.voice);
- default:
- throw new Error(`Unsupported type: ${info.type}`);
- }
- }
-
// Private
_isAudioValid(audio, source) {
diff --git a/ext/mixed/js/display-audio.js b/ext/mixed/js/display-audio.js
index 0cd8a625..cc7e9e93 100644
--- a/ext/mixed/js/display-audio.js
+++ b/ext/mixed/js/display-audio.js
@@ -17,16 +17,19 @@
/* global
* AudioSystem
+ * CacheMap
+ * api
*/
class DisplayAudio {
constructor(display) {
this._display = display;
this._audioPlaying = null;
- this._audioSystem = new AudioSystem(true);
+ this._audioSystem = new AudioSystem();
this._autoPlayAudioTimer = null;
this._autoPlayAudioDelay = 400;
this._eventListeners = new EventListenerCollection();
+ this._cache = new CacheMap(32);
}
get autoPlayAudioDelay() {
@@ -118,7 +121,7 @@ class DisplayAudio {
let info;
try {
let source;
- ({audio, source} = await this._audioSystem.createExpressionAudio(sources, expression, reading, {textToSpeechVoice, customSourceUrl}));
+ ({audio, source} = await this._createExpressionAudio(sources, expression, reading, {textToSpeechVoice, customSourceUrl}));
const sourceIndex = sources.indexOf(source);
info = `From source ${1 + sourceIndex}: ${source}`;
} catch (e) {
@@ -182,4 +185,44 @@ class DisplayAudio {
}
return results;
}
+
+ async _createExpressionAudio(sources, expression, reading, details) {
+ const key = JSON.stringify([expression, reading]);
+
+ const cacheValue = this._cache.get(key);
+ if (typeof cacheValue !== 'undefined') {
+ return cacheValue;
+ }
+
+ for (let i = 0, ii = sources.length; i < ii; ++i) {
+ const source = sources[i];
+ const infoList = await await api.getExpressionAudioInfoList(source, expression, reading, details);
+ for (let j = 0, jj = infoList.length; j < jj; ++j) {
+ const info = infoList[j];
+ let audio;
+ try {
+ audio = await this._createAudioFromInfo(info, source);
+ } catch (e) {
+ continue;
+ }
+
+ const result = {audio, source, infoList, infoListIndex: j};
+ this._cache.set(key, result);
+ return result;
+ }
+ }
+
+ throw new Error('Could not create audio');
+ }
+
+ async _createAudioFromInfo(info, source) {
+ switch (info.type) {
+ case 'url':
+ return await this._audioSystem.createAudio(info.url, source);
+ case 'tts':
+ return this._audioSystem.createTextToSpeechAudio(info.text, info.voice);
+ default:
+ throw new Error(`Unsupported type: ${info.type}`);
+ }
+ }
}
diff --git a/ext/mixed/js/text-to-speech-audio.js b/ext/mixed/js/text-to-speech-audio.js
index 69244514..a32916f4 100644
--- a/ext/mixed/js/text-to-speech-audio.js
+++ b/ext/mixed/js/text-to-speech-audio.js
@@ -17,8 +17,8 @@
class TextToSpeechAudio {
constructor(text, voice) {
- this.text = text;
- this.voice = voice;
+ this._text = text;
+ this._voice = voice;
this._utterance = null;
this._volume = 1;
}
@@ -26,6 +26,7 @@ class TextToSpeechAudio {
get currentTime() {
return 0;
}
+
set currentTime(value) {
// NOP
}
@@ -33,6 +34,7 @@ class TextToSpeechAudio {
get volume() {
return this._volume;
}
+
set volume(value) {
this._volume = value;
if (this._utterance !== null) {
@@ -43,10 +45,10 @@ class TextToSpeechAudio {
async play() {
try {
if (this._utterance === null) {
- this._utterance = new SpeechSynthesisUtterance(this.text || '');
+ this._utterance = new SpeechSynthesisUtterance(typeof this._text === 'string' ? this._text : '');
this._utterance.lang = 'ja-JP';
this._utterance.volume = this._volume;
- this._utterance.voice = this.voice;
+ this._utterance.voice = this._voice;
}
speechSynthesis.cancel();