aboutsummaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authortoasted-nutbread <toasted-nutbread@users.noreply.github.com>2020-03-07 13:27:44 -0500
committertoasted-nutbread <toasted-nutbread@users.noreply.github.com>2020-03-07 21:42:19 -0500
commit75eac153d625c54892a6f7194d0cfa4160ffe722 (patch)
tree9d7855e61a3e47cfdae553a9c2657c078391470d
parentd8e2e69ca5ac7afff3cc385cc0cd18852c8d850b (diff)
Remove old APIs
-rw-r--r--ext/mixed/js/audio.js110
1 files changed, 0 insertions, 110 deletions
diff --git a/ext/mixed/js/audio.js b/ext/mixed/js/audio.js
index 6a338cca..d2feae04 100644
--- a/ext/mixed/js/audio.js
+++ b/ext/mixed/js/audio.js
@@ -66,21 +66,6 @@ class TextToSpeechAudio {
// NOP
}
}
-
- static createFromUri(ttsUri) {
- const m = /^tts:[^#?]*\?([^#]*)/.exec(ttsUri);
- if (m === null) { return null; }
-
- const searchParameters = new URLSearchParams(m[1]);
- const text = searchParameters.get('text');
- let voice = searchParameters.get('voice');
- if (text === null || voice === null) { return null; }
-
- voice = audioGetTextToSpeechVoice(voice);
- if (voice === null) { return null; }
-
- return new TextToSpeechAudio(text, voice);
- }
}
class AudioSystem {
@@ -199,98 +184,3 @@ class AudioSystem {
}
}
}
-
-
-function audioGetFromUrl(url, willDownload) {
- const tts = TextToSpeechAudio.createFromUri(url);
- if (tts !== null) {
- if (willDownload) {
- throw new Error('AnkiConnect does not support downloading text-to-speech audio.');
- }
- return Promise.resolve(tts);
- }
-
- return new Promise((resolve, reject) => {
- const audio = new Audio(url);
- audio.addEventListener('loadeddata', () => {
- if (audio.duration === 5.694694 || audio.duration === 5.720718) {
- // Hardcoded values for invalid audio
- reject(new Error('Could not retrieve audio'));
- } else {
- resolve(audio);
- }
- });
- audio.addEventListener('error', () => reject(audio.error));
- });
-}
-
-async function audioGetFromSources(expression, sources, optionsContext, willDownload, cache=null) {
- const key = `${expression.expression}:${expression.reading}`;
- if (cache !== null) {
- const cacheValue = cache.get(expression);
- if (typeof cacheValue !== 'undefined') {
- return cacheValue;
- }
- }
-
- for (let i = 0, ii = sources.length; i < ii; ++i) {
- const source = sources[i];
- const url = await apiAudioGetUrl(expression, source, optionsContext);
- if (url === null) {
- continue;
- }
-
- try {
- let audio = await audioGetFromUrl(url, willDownload);
- if (willDownload) {
- // AnkiConnect handles downloading URLs into cards
- audio = null;
- }
- const result = {audio, url, source};
- if (cache !== null) {
- cache.set(key, result);
- }
- return result;
- } catch (e) {
- // NOP
- }
- }
- return {audio: null, url: null, source: null};
-}
-
-function audioGetTextToSpeechVoice(voiceURI) {
- try {
- for (const voice of speechSynthesis.getVoices()) {
- if (voice.voiceURI === voiceURI) {
- return voice;
- }
- }
- } catch (e) {
- // NOP
- }
- return null;
-}
-
-function audioPrepareTextToSpeech(options) {
- if (
- audioPrepareTextToSpeech.state ||
- !options.audio.textToSpeechVoice ||
- !(
- options.audio.sources.includes('text-to-speech') ||
- options.audio.sources.includes('text-to-speech-reading')
- )
- ) {
- // Text-to-speech not in use.
- return;
- }
-
- // Chrome needs this value called once before it will become populated.
- // The first call will return an empty list.
- audioPrepareTextToSpeech.state = true;
- try {
- speechSynthesis.getVoices();
- } catch (e) {
- // NOP
- }
-}
-audioPrepareTextToSpeech.state = false;