aboutsummaryrefslogtreecommitdiff
path: root/ext/mixed/js
diff options
context:
space:
mode:
Diffstat (limited to 'ext/mixed/js')
-rw-r--r--ext/mixed/js/api.js8
-rw-r--r--ext/mixed/js/audio-system.js185
-rw-r--r--ext/mixed/js/audio.js178
-rw-r--r--ext/mixed/js/core.js28
-rw-r--r--ext/mixed/js/display-generator.js7
-rw-r--r--ext/mixed/js/display.js284
-rw-r--r--ext/mixed/js/scroll.js2
-rw-r--r--ext/mixed/js/text-scanner.js6
8 files changed, 359 insertions, 339 deletions
diff --git a/ext/mixed/js/api.js b/ext/mixed/js/api.js
index 7ea68d59..0ab07039 100644
--- a/ext/mixed/js/api.js
+++ b/ext/mixed/js/api.js
@@ -69,8 +69,8 @@ function apiTemplateRender(template, data) {
return _apiInvoke('templateRender', {data, template});
}
-function apiAudioGetUrl(definition, source, optionsContext) {
- return _apiInvoke('audioGetUrl', {definition, source, optionsContext});
+function apiAudioGetUri(definition, source, optionsContext) {
+ return _apiInvoke('audioGetUri', {definition, source, optionsContext});
}
function apiCommandExec(command, params) {
@@ -117,6 +117,10 @@ function apiGetMessageToken() {
return _apiInvoke('getMessageToken');
}
+function apiGetDefaultAnkiFieldTemplates() {
+ return _apiInvoke('getDefaultAnkiFieldTemplates');
+}
+
function _apiInvoke(action, params={}) {
const data = {action, params};
return new Promise((resolve, reject) => {
diff --git a/ext/mixed/js/audio-system.js b/ext/mixed/js/audio-system.js
new file mode 100644
index 00000000..31c476b1
--- /dev/null
+++ b/ext/mixed/js/audio-system.js
@@ -0,0 +1,185 @@
+/*
+ * Copyright (C) 2019-2020 Alex Yatskov <alex@foosoft.net>
+ * Author: Alex Yatskov <alex@foosoft.net>
+ *
+ * This program is free software: you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation, either version 3 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program. If not, see <https://www.gnu.org/licenses/>.
+ */
+
+class TextToSpeechAudio {
+ constructor(text, voice) {
+ this.text = text;
+ this.voice = voice;
+ this._utterance = null;
+ this._volume = 1;
+ }
+
+ get currentTime() {
+ return 0;
+ }
+ set currentTime(value) {
+ // NOP
+ }
+
+ get volume() {
+ return this._volume;
+ }
+ set volume(value) {
+ this._volume = value;
+ if (this._utterance !== null) {
+ this._utterance.volume = value;
+ }
+ }
+
+ play() {
+ try {
+ if (this._utterance === null) {
+ this._utterance = new SpeechSynthesisUtterance(this.text || '');
+ this._utterance.lang = 'ja-JP';
+ this._utterance.volume = this._volume;
+ this._utterance.voice = this.voice;
+ }
+
+ speechSynthesis.cancel();
+ speechSynthesis.speak(this._utterance);
+ } catch (e) {
+ // NOP
+ }
+ }
+
+ pause() {
+ try {
+ speechSynthesis.cancel();
+ } catch (e) {
+ // NOP
+ }
+ }
+}
+
+class AudioSystem {
+ constructor({getAudioUri}) {
+ this._cache = new Map();
+ this._cacheSizeMaximum = 32;
+ this._getAudioUri = getAudioUri;
+
+ if (typeof speechSynthesis !== 'undefined') {
+ // speechSynthesis.getVoices() will not be populated unless some API call is made.
+ speechSynthesis.addEventListener('voiceschanged', this._onVoicesChanged.bind(this));
+ }
+ }
+
+ async getDefinitionAudio(definition, sources, details) {
+ const key = `${definition.expression}:${definition.reading}`;
+ const cacheValue = this._cache.get(definition);
+ if (typeof cacheValue !== 'undefined') {
+ const {audio, uri, source} = cacheValue;
+ return {audio, uri, source};
+ }
+
+ for (const source of sources) {
+ const uri = await this._getAudioUri(definition, source, details);
+ if (uri === null) { continue; }
+
+ try {
+ const audio = await this._createAudio(uri, details);
+ this._cacheCheck();
+ this._cache.set(key, {audio, uri, source});
+ return {audio, uri, source};
+ } catch (e) {
+ // NOP
+ }
+ }
+
+ throw new Error('Could not create audio');
+ }
+
+ createTextToSpeechAudio({text, voiceUri}) {
+ const voice = this._getTextToSpeechVoiceFromVoiceUri(voiceUri);
+ if (voice === null) {
+ throw new Error('Invalid text-to-speech voice');
+ }
+ return new TextToSpeechAudio(text, voice);
+ }
+
+ _onVoicesChanged() {
+ // NOP
+ }
+
+ async _createAudio(uri, details) {
+ const ttsParameters = this._getTextToSpeechParameters(uri);
+ if (ttsParameters !== null) {
+ if (typeof details === 'object' && details !== null) {
+ if (details.tts === false) {
+ throw new Error('Text-to-speech not permitted');
+ }
+ }
+ return this.createTextToSpeechAudio(ttsParameters);
+ }
+
+ return await this._createAudioFromUrl(uri);
+ }
+
+ _createAudioFromUrl(url) {
+ return new Promise((resolve, reject) => {
+ const audio = new Audio(url);
+ audio.addEventListener('loadeddata', () => {
+ const duration = audio.duration;
+ if (duration === 5.694694 || duration === 5.720718) {
+ // Hardcoded values for invalid audio
+ reject(new Error('Could not retrieve audio'));
+ } else {
+ resolve(audio);
+ }
+ });
+ audio.addEventListener('error', () => reject(audio.error));
+ });
+ }
+
+ _getTextToSpeechVoiceFromVoiceUri(voiceUri) {
+ try {
+ for (const voice of speechSynthesis.getVoices()) {
+ if (voice.voiceURI === voiceUri) {
+ return voice;
+ }
+ }
+ } catch (e) {
+ // NOP
+ }
+ return null;
+ }
+
+ _getTextToSpeechParameters(uri) {
+ const m = /^tts:[^#?]*\?([^#]*)/.exec(uri);
+ if (m === null) { return null; }
+
+ const searchParameters = new URLSearchParams(m[1]);
+ const text = searchParameters.get('text');
+ const voiceUri = searchParameters.get('voice');
+ return (text !== null && voiceUri !== null ? {text, voiceUri} : null);
+ }
+
+ _cacheCheck() {
+ const removeCount = this._cache.size - this._cacheSizeMaximum;
+ if (removeCount <= 0) { return; }
+
+ const removeKeys = [];
+ for (const key of this._cache.keys()) {
+ removeKeys.push(key);
+ if (removeKeys.length >= removeCount) { break; }
+ }
+
+ for (const key of removeKeys) {
+ this._cache.delete(key);
+ }
+ }
+}
diff --git a/ext/mixed/js/audio.js b/ext/mixed/js/audio.js
deleted file mode 100644
index b5a025be..00000000
--- a/ext/mixed/js/audio.js
+++ /dev/null
@@ -1,178 +0,0 @@
-/*
- * Copyright (C) 2019-2020 Alex Yatskov <alex@foosoft.net>
- * Author: Alex Yatskov <alex@foosoft.net>
- *
- * This program is free software: you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation, either version 3 of the License, or
- * (at your option) any later version.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program. If not, see <https://www.gnu.org/licenses/>.
- */
-
-/*global apiAudioGetUrl*/
-
-class TextToSpeechAudio {
- constructor(text, voice) {
- this.text = text;
- this.voice = voice;
- this._utterance = null;
- this._volume = 1;
- }
-
- get currentTime() {
- return 0;
- }
- set currentTime(value) {
- // NOP
- }
-
- get volume() {
- return this._volume;
- }
- set volume(value) {
- this._volume = value;
- if (this._utterance !== null) {
- this._utterance.volume = value;
- }
- }
-
- play() {
- try {
- if (this._utterance === null) {
- this._utterance = new SpeechSynthesisUtterance(this.text || '');
- this._utterance.lang = 'ja-JP';
- this._utterance.volume = this._volume;
- this._utterance.voice = this.voice;
- }
-
- speechSynthesis.cancel();
- speechSynthesis.speak(this._utterance);
- } catch (e) {
- // NOP
- }
- }
-
- pause() {
- try {
- speechSynthesis.cancel();
- } catch (e) {
- // NOP
- }
- }
-
- static createFromUri(ttsUri) {
- const m = /^tts:[^#?]*\?([^#]*)/.exec(ttsUri);
- if (m === null) { return null; }
-
- const searchParameters = new URLSearchParams(m[1]);
- const text = searchParameters.get('text');
- let voice = searchParameters.get('voice');
- if (text === null || voice === null) { return null; }
-
- voice = audioGetTextToSpeechVoice(voice);
- if (voice === null) { return null; }
-
- return new TextToSpeechAudio(text, voice);
- }
-}
-
-function audioGetFromUrl(url, willDownload) {
- const tts = TextToSpeechAudio.createFromUri(url);
- if (tts !== null) {
- if (willDownload) {
- throw new Error('AnkiConnect does not support downloading text-to-speech audio.');
- }
- return Promise.resolve(tts);
- }
-
- return new Promise((resolve, reject) => {
- const audio = new Audio(url);
- audio.addEventListener('loadeddata', () => {
- if (audio.duration === 5.694694 || audio.duration === 5.720718) {
- // Hardcoded values for invalid audio
- reject(new Error('Could not retrieve audio'));
- } else {
- resolve(audio);
- }
- });
- audio.addEventListener('error', () => reject(audio.error));
- });
-}
-
-async function audioGetFromSources(expression, sources, optionsContext, willDownload, cache=null) {
- const key = `${expression.expression}:${expression.reading}`;
- if (cache !== null) {
- const cacheValue = cache.get(expression);
- if (typeof cacheValue !== 'undefined') {
- return cacheValue;
- }
- }
-
- for (let i = 0, ii = sources.length; i < ii; ++i) {
- const source = sources[i];
- const url = await apiAudioGetUrl(expression, source, optionsContext);
- if (url === null) {
- continue;
- }
-
- try {
- let audio = await audioGetFromUrl(url, willDownload);
- if (willDownload) {
- // AnkiConnect handles downloading URLs into cards
- audio = null;
- }
- const result = {audio, url, source};
- if (cache !== null) {
- cache.set(key, result);
- }
- return result;
- } catch (e) {
- // NOP
- }
- }
- return {audio: null, url: null, source: null};
-}
-
-function audioGetTextToSpeechVoice(voiceURI) {
- try {
- for (const voice of speechSynthesis.getVoices()) {
- if (voice.voiceURI === voiceURI) {
- return voice;
- }
- }
- } catch (e) {
- // NOP
- }
- return null;
-}
-
-function audioPrepareTextToSpeech(options) {
- if (
- audioPrepareTextToSpeech.state ||
- !options.audio.textToSpeechVoice ||
- !(
- options.audio.sources.includes('text-to-speech') ||
- options.audio.sources.includes('text-to-speech-reading')
- )
- ) {
- // Text-to-speech not in use.
- return;
- }
-
- // Chrome needs this value called once before it will become populated.
- // The first call will return an empty list.
- audioPrepareTextToSpeech.state = true;
- try {
- speechSynthesis.getVoices();
- } catch (e) {
- // NOP
- }
-}
-audioPrepareTextToSpeech.state = false;
diff --git a/ext/mixed/js/core.js b/ext/mixed/js/core.js
index 83813796..0d50e915 100644
--- a/ext/mixed/js/core.js
+++ b/ext/mixed/js/core.js
@@ -175,21 +175,6 @@ function promiseTimeout(delay, resolveValue) {
return promise;
}
-function stringReplaceAsync(str, regex, replacer) {
- let match;
- let index = 0;
- const parts = [];
- while ((match = regex.exec(str)) !== null) {
- parts.push(str.substring(index, match.index), replacer(...match, match.index, str));
- index = regex.lastIndex;
- }
- if (parts.length === 0) {
- return Promise.resolve(str);
- }
- parts.push(str.substring(index));
- return Promise.all(parts).then((v) => v.join(''));
-}
-
/*
* Common events
@@ -269,7 +254,11 @@ const yomichan = (() => {
constructor() {
super();
+ this._isBackendPreparedResolve = null;
+ this._isBackendPreparedPromise = new Promise((resolve) => (this._isBackendPreparedResolve = resolve));
+
this._messageHandlers = new Map([
+ ['backendPrepared', this._onBackendPrepared.bind(this)],
['getUrl', this._onMessageGetUrl.bind(this)],
['optionsUpdated', this._onMessageOptionsUpdated.bind(this)],
['zoomChanged', this._onMessageZoomChanged.bind(this)]
@@ -280,6 +269,11 @@ const yomichan = (() => {
// Public
+ prepare() {
+ chrome.runtime.sendMessage({action: 'yomichanCoreReady'});
+ return this._isBackendPreparedPromise;
+ }
+
generateId(length) {
const array = new Uint8Array(length);
window.crypto.getRandomValues(array);
@@ -305,6 +299,10 @@ const yomichan = (() => {
return false;
}
+ _onBackendPrepared() {
+ this._isBackendPreparedResolve();
+ }
+
_onMessageGetUrl() {
return {url: window.location.href};
}
diff --git a/ext/mixed/js/display-generator.js b/ext/mixed/js/display-generator.js
index d7e77cc0..49afc44b 100644
--- a/ext/mixed/js/display-generator.js
+++ b/ext/mixed/js/display-generator.js
@@ -16,7 +16,10 @@
* along with this program. If not, see <http://www.gnu.org/licenses/>.
*/
-/*global apiGetDisplayTemplatesHtml, TemplateHandler*/
+/* global
+ * TemplateHandler
+ * apiGetDisplayTemplatesHtml
+ */
class DisplayGenerator {
constructor() {
@@ -298,7 +301,7 @@ class DisplayGenerator {
}
static _isCharacterKanji(c) {
- const code = c.charCodeAt(0);
+ const code = c.codePointAt(0);
return (
code >= 0x4e00 && code < 0x9fb0 ||
code >= 0x3400 && code < 0x4dc0
diff --git a/ext/mixed/js/display.js b/ext/mixed/js/display.js
index 5d3076ee..515e28a7 100644
--- a/ext/mixed/js/display.js
+++ b/ext/mixed/js/display.js
@@ -16,11 +16,24 @@
* along with this program. If not, see <https://www.gnu.org/licenses/>.
*/
-/*global docRangeFromPoint, docSentenceExtract
-apiKanjiFind, apiTermsFind, apiNoteView, apiOptionsGet, apiDefinitionsAddable, apiDefinitionAdd
-apiScreenshotGet, apiForward
-audioPrepareTextToSpeech, audioGetFromSources
-DisplayGenerator, WindowScroll, DisplayContext, DOM*/
+/* global
+ * AudioSystem
+ * DOM
+ * DisplayContext
+ * DisplayGenerator
+ * WindowScroll
+ * apiAudioGetUri
+ * apiDefinitionAdd
+ * apiDefinitionsAddable
+ * apiForward
+ * apiKanjiFind
+ * apiNoteView
+ * apiOptionsGet
+ * apiScreenshotGet
+ * apiTermsFind
+ * docRangeFromPoint
+ * docSentenceExtract
+ */
class Display {
constructor(spinner, container) {
@@ -32,7 +45,7 @@ class Display {
this.index = 0;
this.audioPlaying = null;
this.audioFallback = null;
- this.audioCache = new Map();
+ this.audioSystem = new AudioSystem({getAudioUri: this._getAudioUri.bind(this)});
this.styleNode = null;
this.eventListeners = new EventListenerCollection();
@@ -45,10 +58,115 @@ class Display {
this.displayGenerator = new DisplayGenerator();
this.windowScroll = new WindowScroll();
+ this._onKeyDownHandlers = new Map([
+ ['Escape', () => {
+ this.onSearchClear();
+ return true;
+ }],
+ ['PageUp', (e) => {
+ if (e.altKey) {
+ this.entryScrollIntoView(this.index - 3, null, true);
+ return true;
+ }
+ return false;
+ }],
+ ['PageDown', (e) => {
+ if (e.altKey) {
+ this.entryScrollIntoView(this.index + 3, null, true);
+ return true;
+ }
+ return false;
+ }],
+ ['End', (e) => {
+ if (e.altKey) {
+ this.entryScrollIntoView(this.definitions.length - 1, null, true);
+ return true;
+ }
+ return false;
+ }],
+ ['Home', (e) => {
+ if (e.altKey) {
+ this.entryScrollIntoView(0, null, true);
+ return true;
+ }
+ return false;
+ }],
+ ['ArrowUp', (e) => {
+ if (e.altKey) {
+ this.entryScrollIntoView(this.index - 1, null, true);
+ return true;
+ }
+ return false;
+ }],
+ ['ArrowDown', (e) => {
+ if (e.altKey) {
+ this.entryScrollIntoView(this.index + 1, null, true);
+ return true;
+ }
+ return false;
+ }],
+ ['B', (e) => {
+ if (e.altKey) {
+ this.sourceTermView();
+ return true;
+ }
+ return false;
+ }],
+ ['F', (e) => {
+ if (e.altKey) {
+ this.nextTermView();
+ return true;
+ }
+ return false;
+ }],
+ ['E', (e) => {
+ if (e.altKey) {
+ this.noteTryAdd('term-kanji');
+ return true;
+ }
+ return false;
+ }],
+ ['K', (e) => {
+ if (e.altKey) {
+ this.noteTryAdd('kanji');
+ return true;
+ }
+ return false;
+ }],
+ ['R', (e) => {
+ if (e.altKey) {
+ this.noteTryAdd('term-kana');
+ return true;
+ }
+ return false;
+ }],
+ ['P', (e) => {
+ if (e.altKey) {
+ const index = this.index;
+ if (index < 0 || index >= this.definitions.length) { return; }
+
+ const entry = this.getEntry(index);
+ if (entry !== null && entry.dataset.type === 'term') {
+ this.audioPlay(this.definitions[index], this.firstExpressionIndex, index);
+ }
+ return true;
+ }
+ return false;
+ }],
+ ['V', (e) => {
+ if (e.altKey) {
+ this.noteTryView();
+ return true;
+ }
+ return false;
+ }]
+ ]);
+
this.setInteractive(true);
}
async prepare(options=null) {
+ await yomichan.prepare();
const displayGeneratorPromise = this.displayGenerator.prepare();
const updateOptionsPromise = this.updateOptions(options);
await Promise.all([displayGeneratorPromise, updateOptionsPromise]);
@@ -215,9 +333,9 @@ class Display {
onKeyDown(e) {
const key = Display.getKeyFromEvent(e);
- const handler = Display._onKeyDownHandlers.get(key);
+ const handler = this._onKeyDownHandlers.get(key);
if (typeof handler === 'function') {
- if (handler(this, e)) {
+ if (handler(e)) {
e.preventDefault();
return true;
}
@@ -259,13 +377,12 @@ class Display {
this.updateDocumentOptions(this.options);
this.updateTheme(this.options.general.popupTheme);
this.setCustomCss(this.options.general.customPopupCss);
- audioPrepareTextToSpeech(this.options);
}
updateDocumentOptions(options) {
const data = document.documentElement.dataset;
data.ankiEnabled = `${options.anki.enable}`;
- data.audioEnabled = `${options.audio.enable}`;
+ data.audioEnabled = `${options.audio.enabled}`;
data.compactGlossaries = `${options.general.compactGlossaries}`;
data.enableSearchTags = `${options.scanning.enableSearchTags}`;
data.debug = `${options.general.debugInfo}`;
@@ -520,15 +637,13 @@ class Display {
updateAdderButtons(states) {
for (let i = 0; i < states.length; ++i) {
- const state = states[i];
let noteId = null;
- for (const mode in state) {
+ for (const [mode, info] of Object.entries(states[i])) {
const button = this.adderButtonFind(i, mode);
if (button === null) {
continue;
}
- const info = state[mode];
if (!info.canAdd && noteId === null && info.noteId) {
noteId = info.noteId;
}
@@ -635,7 +750,7 @@ class Display {
this.setSpinnerVisible(true);
const context = {};
- if (this.noteUsesScreenshot()) {
+ if (this.noteUsesScreenshot(mode)) {
const screenshot = await this.getScreenshot();
if (screenshot) {
context.screenshot = screenshot;
@@ -672,16 +787,16 @@ class Display {
}
const sources = this.options.audio.sources;
- let {audio, source} = await audioGetFromSources(expression, sources, this.getOptionsContext(), false, this.audioCache);
- let info;
- if (audio === null) {
+ let audio, source, info;
+ try {
+ ({audio, source} = await this.audioSystem.getDefinitionAudio(expression, sources));
+ info = `From source ${1 + sources.indexOf(source)}: ${source}`;
+ } catch (e) {
if (this.audioFallback === null) {
this.audioFallback = new Audio('/mixed/mp3/button.mp3');
}
audio = this.audioFallback;
info = 'Could not find audio';
- } else {
- info = `From source ${1 + sources.indexOf(source)}: ${source}`;
}
const button = this.audioButtonFindImage(entryIndex);
@@ -705,10 +820,11 @@ class Display {
}
}
- noteUsesScreenshot() {
- const fields = this.options.anki.terms.fields;
- for (const name in fields) {
- if (fields[name].includes('{screenshot}')) {
+ noteUsesScreenshot(mode) {
+ const optionsAnki = this.options.anki;
+ const fields = (mode === 'kanji' ? optionsAnki.kanji : optionsAnki.terms).fields;
+ for (const fieldValue of Object.values(fields)) {
+ if (fieldValue.includes('{screenshot}')) {
return true;
}
}
@@ -814,121 +930,9 @@ class Display {
const key = event.key;
return (typeof key === 'string' ? (key.length === 1 ? key.toUpperCase() : key) : '');
}
-}
-
-Display._onKeyDownHandlers = new Map([
- ['Escape', (self) => {
- self.onSearchClear();
- return true;
- }],
-
- ['PageUp', (self, e) => {
- if (e.altKey) {
- self.entryScrollIntoView(self.index - 3, null, true);
- return true;
- }
- return false;
- }],
-
- ['PageDown', (self, e) => {
- if (e.altKey) {
- self.entryScrollIntoView(self.index + 3, null, true);
- return true;
- }
- return false;
- }],
-
- ['End', (self, e) => {
- if (e.altKey) {
- self.entryScrollIntoView(self.definitions.length - 1, null, true);
- return true;
- }
- return false;
- }],
-
- ['Home', (self, e) => {
- if (e.altKey) {
- self.entryScrollIntoView(0, null, true);
- return true;
- }
- return false;
- }],
- ['ArrowUp', (self, e) => {
- if (e.altKey) {
- self.entryScrollIntoView(self.index - 1, null, true);
- return true;
- }
- return false;
- }],
-
- ['ArrowDown', (self, e) => {
- if (e.altKey) {
- self.entryScrollIntoView(self.index + 1, null, true);
- return true;
- }
- return false;
- }],
-
- ['B', (self, e) => {
- if (e.altKey) {
- self.sourceTermView();
- return true;
- }
- return false;
- }],
-
- ['F', (self, e) => {
- if (e.altKey) {
- self.nextTermView();
- return true;
- }
- return false;
- }],
-
- ['E', (self, e) => {
- if (e.altKey) {
- self.noteTryAdd('term-kanji');
- return true;
- }
- return false;
- }],
-
- ['K', (self, e) => {
- if (e.altKey) {
- self.noteTryAdd('kanji');
- return true;
- }
- return false;
- }],
-
- ['R', (self, e) => {
- if (e.altKey) {
- self.noteTryAdd('term-kana');
- return true;
- }
- return false;
- }],
-
- ['P', (self, e) => {
- if (e.altKey) {
- const index = self.index;
- if (index < 0 || index >= self.definitions.length) { return; }
-
- const entry = self.getEntry(index);
- if (entry !== null && entry.dataset.type === 'term') {
- self.audioPlay(self.definitions[index], self.firstExpressionIndex, index);
- }
- return true;
- }
- return false;
- }],
-
- ['V', (self, e) => {
- if (e.altKey) {
- self.noteTryView();
- return true;
- }
- return false;
- }]
-]);
+ async _getAudioUri(definition, source) {
+ const optionsContext = this.getOptionsContext();
+ return await apiAudioGetUri(definition, source, optionsContext);
+ }
+}
diff --git a/ext/mixed/js/scroll.js b/ext/mixed/js/scroll.js
index 5829d294..72da8b65 100644
--- a/ext/mixed/js/scroll.js
+++ b/ext/mixed/js/scroll.js
@@ -26,7 +26,7 @@ class WindowScroll {
this.animationEndTime = 0;
this.animationEndX = 0;
this.animationEndY = 0;
- this.requestAnimationFrameCallback = (t) => this.onAnimationFrame(t);
+ this.requestAnimationFrameCallback = this.onAnimationFrame.bind(this);
}
toY(y) {
diff --git a/ext/mixed/js/text-scanner.js b/ext/mixed/js/text-scanner.js
index ff0eac8b..a08e09fb 100644
--- a/ext/mixed/js/text-scanner.js
+++ b/ext/mixed/js/text-scanner.js
@@ -16,7 +16,11 @@
* along with this program. If not, see <https://www.gnu.org/licenses/>.
*/
-/*global docRangeFromPoint, TextSourceRange, DOM*/
+/* global
+ * DOM
+ * TextSourceRange
+ * docRangeFromPoint
+ */
class TextScanner {
constructor(node, ignoreNodes, ignoreElements, ignorePoints) {