Use callbacks for script injection
Committed c34913
Stop explicity tracking microphone state during voice dictation.
--- a/canopy/static/enliven.js
+++ b/canopy/static/enliven.js
import { _, go, upgradeLink } from '/static/web.js' // eslint-disable-line
-const injectScript = src => {
+const injectScript = (src, onload) => {
const tag = document.createElement('script')
tag.src = src
tag.type = 'text/javascript'
- tag.async = false
+ if (typeof onload !== 'undefined') { tag.onload = onload }
document.getElementsByTagName('head')[0].appendChild(tag)
}
const injectStylesheet = href => {
}
}, false)
- injectScript('/assets/webtorrent-2.1.30.js')
-
- btclient = new WebTorrent()
-
- injectScript('/assets/drag-drop-7.2.0.js')
+ injectScript('/assets/webtorrent-2.1.30.js', () => {
+ btclient = new WebTorrent()
+ })
- DragDrop('body', (files, pos, fileList, directories) => {
- console.log(files[0])
- btclient.seed(files, torrent => {
- console.log(`Client is seeding at:\n${torrent.magnetURI}`)
+ injectScript('/assets/drag-drop-7.2.0.js', () => {
+ DragDrop('body', (files, pos, fileList, directories) => {
+ console.log('Attempting to seed over WebTorrent:', files[0])
+ btclient.seed(files, torrent => {
+ console.log(`Seeding file over WebTorrent: ${torrent.magnetURI}`)
+ })
})
})
})
document.documentElement.className = mode
}
-const initDictation = async () => {
+const initDictation = () => {
microphoneStatus('downloading')
- injectScript('/assets/vosk-0.0.8.js')
-
- microphoneStatus('loading')
- const partialContainer = document.querySelector('#search .partial')
-
- const channel = new MessageChannel()
- const model = await Vosk.createModel('/static/vosk-model-small-en-us-0.15.tar.gz')
- model.registerPort(channel.port1)
+ injectScript('/assets/vosk-0.0.8.js', async () => {
+ microphoneStatus('loading')
+ const partialContainer = document.querySelector('#search .partial')
- const sampleRate = 48000
+ const channel = new MessageChannel()
+ const model = await Vosk.createModel('/static/vosk-model-small-en-us-0.15.tar.gz')
+ model.registerPort(channel.port1)
- const recognizer = new model.KaldiRecognizer(sampleRate)
- recognizer.setWords(true)
+ const sampleRate = 48000
- const wakeWord = 'ghost'
- const readyWord = 'Say "ghost help"'
- let state = 'asleep'
- microphoneStatus('on')
- partialContainer.innerHTML = readyWord
+ const recognizer = new model.KaldiRecognizer(sampleRate)
+ recognizer.setWords(true)
- recognizer.on('result', message => {
- let input = message.result.text
- if (following && input !== '') {
- let number = input.replace(' ', '')
- if (number === 'for') number = 'four'
- go(followList[number])
- hideFollowLinks()
- state = 'asleep'
- microphoneStatus('on')
- partialContainer.innerHTML = readyWord
- return
- }
- if (input.slice(0, wakeWord.length) !== wakeWord) {
- partialContainer.innerHTML = readyWord
- return
- }
- input = input.slice(wakeWord.length + 1)
+ const wakeWord = 'ghost'
+ const readyWord = 'Say "ghost help"'
microphoneStatus('on')
- if (input.endsWith('cancel cancel')) {
- partialContainer.innerHTML = ''
- return
- }
- if (input === 'help') showGuide() // help
- else if (input.startsWith('query for')) { // query for
- const query = input.slice(10)
- document.querySelector('input[name=q]').value = query
- go(`/search?q=${query}`)
- } else if (input.startsWith('go')) { // go
- switch (input.slice(3)) {
- case 'home': goHome(); break // home
- case 'up': goUp(); break // up
- case 'prev': goPrevious(); break // prev
- case 'next': goNext(); break // next
- case 'back': goBack(); break // back
- case 'forward': goForward(); break // forward
+ partialContainer.innerHTML = readyWord
+
+ recognizer.on('result', message => {
+ let input = message.result.text
+ if (following && input !== '') {
+ let number = input.replace(' ', '')
+ if (number === 'for') number = 'four'
+ go(followList[number])
+ hideFollowLinks()
+ microphoneStatus('on')
+ partialContainer.innerHTML = readyWord
+ return
}
- } else if (input.startsWith('follow')) { // follow
- showVoiceFollowLinks()
- } else if (input.startsWith('tell me')) { // tell me
- const request = input.slice(8)
- fetch('/ai/assistant', {
- method: 'POST',
- headers: {
- 'Content-Type': 'application/json'
- },
- body: JSON.stringify({ request })
- })
- .then(response => response.blob())
- .then(blob => {
- const audio = new Audio(URL.createObjectURL(blob))
- audio.addEventListener('canplaythrough', () => {
- state = 'asleep'
- microphoneStatus('on')
- audio.play()
- // partialContainer.value = readyWord
- })
+ if (input.slice(0, wakeWord.length) !== wakeWord) {
+ partialContainer.innerHTML = readyWord
+ return
+ }
+ input = input.slice(wakeWord.length + 1)
+ microphoneStatus('on')
+ if (input.endsWith('cancel cancel')) {
+ partialContainer.innerHTML = ''
+ return
+ }
+ if (input === 'help') showGuide() // help
+ else if (input.startsWith('query for')) { // query for
+ const query = input.slice(10)
+ document.querySelector('input[name=q]').value = query
+ go(`/search?q=${query}`)
+ } else if (input.startsWith('go')) { // go
+ switch (input.slice(3)) {
+ case 'home': goHome(); break // home
+ case 'up': goUp(); break // up
+ case 'prev': goPrevious(); break // prev
+ case 'next': goNext(); break // next
+ case 'back': goBack(); break // back
+ case 'forward': goForward(); break // forward
+ }
+ } else if (input.startsWith('follow')) { // follow
+ showVoiceFollowLinks()
+ } else if (input.startsWith('tell me')) { // tell me
+ const request = input.slice(8)
+ fetch('/ai/assistant', {
+ method: 'POST',
+ headers: {
+ 'Content-Type': 'application/json'
+ },
+ body: JSON.stringify({ request })
})
- }
- partialContainer.innerHTML = `${input}`
- })
- recognizer.on('partialresult', message => {
- const input = message.result.partial
- if (input.slice(0, wakeWord.length) !== wakeWord) { return }
- state = 'awake'
- microphoneStatus('active')
- partialContainer.innerHTML = input.slice(wakeWord.length)
- })
-
- const mediaStream = await navigator.mediaDevices.getUserMedia({
- video: false,
- audio: {
- echoCancellation: true,
- noiseSuppression: true,
- channelCount: 1,
- sampleRate
- }
- })
+ .then(response => response.blob())
+ .then(blob => {
+ const audio = new Audio(URL.createObjectURL(blob))
+ audio.addEventListener('canplaythrough', () => {
+ microphoneStatus('on')
+ audio.play()
+ // partialContainer.value = readyWord
+ })
+ })
+ }
+ partialContainer.innerHTML = `${input}`
+ })
+ recognizer.on('partialresult', message => {
+ const input = message.result.partial
+ if (input.slice(0, wakeWord.length) !== wakeWord) { return }
+ microphoneStatus('active')
+ partialContainer.innerHTML = input.slice(wakeWord.length)
+ })
- const audioContext = new AudioContext()
- await audioContext.audioWorklet.addModule('/static/recognizer-processor.js')
- const recognizerProcessor = new AudioWorkletNode(
- audioContext,
- 'recognizer-processor',
- { channelCount: 1, numberOfInputs: 1, numberOfOutputs: 1 }
- )
- recognizerProcessor.port.postMessage(
- { action: 'init', recognizerId: recognizer.id },
- [channel.port2]
- )
- recognizerProcessor.connect(audioContext.destination)
+ const mediaStream = await navigator.mediaDevices.getUserMedia({
+ video: false,
+ audio: {
+ echoCancellation: true,
+ noiseSuppression: true,
+ channelCount: 1,
+ sampleRate
+ }
+ })
- const source = audioContext.createMediaStreamSource(mediaStream)
- source.connect(recognizerProcessor)
+ const audioContext = new AudioContext()
+ await audioContext.audioWorklet.addModule('/static/recognizer-processor.js')
+ const recognizerProcessor = new AudioWorkletNode(
+ audioContext,
+ 'recognizer-processor',
+ { channelCount: 1, numberOfInputs: 1, numberOfOutputs: 1 }
+ )
+ recognizerProcessor.port.postMessage(
+ { action: 'init', recognizerId: recognizer.id },
+ [channel.port2]
+ )
+ recognizerProcessor.connect(audioContext.destination)
+
+ const source = audioContext.createMediaStreamSource(mediaStream)
+ source.connect(recognizerProcessor)
+ })
}
const initChat = () => {
join.style.display = 'none'
injectStylesheet('/chats/mediasoup-demo-app.css')
- injectScript('/chats/mediasoup-demo-app.js')
- injectScript('/chats/resources/js/antiglobal.js')
- window.localStorage.setItem('debug', '* -engine* -socket* -RIE* *WARN* *ERROR*')
- if (window.antiglobal) {
- window.antiglobal('___browserSync___oldSocketIo', 'io', '___browserSync___', '__core-js_shared__')
- setInterval(window.antiglobal, 180000)
- }
+ injectScript('/chats/mediasoup-demo-app.js', () => {
+ injectScript('/chats/resources/js/antiglobal.js', () => {
+ window.localStorage.setItem('debug', '* -engine* -socket* -RIE* *WARN* *ERROR*')
+ if (window.antiglobal) {
+ window.antiglobal('___browserSync___oldSocketIo', 'io', '___browserSync___', '__core-js_shared__')
+ setInterval(window.antiglobal, 180000)
+ }
- const autoMute = () => {
- if (typeof window.CLIENT !== 'undefined' && window.CLIENT._micProducer) {
- window.CLIENT.muteMic()
- clearInterval(autoMuter)
- }
- }
- const autoMuter = setInterval(autoMute, 100)
+ const autoMute = () => {
+ if (typeof window.CLIENT !== 'undefined' && window.CLIENT._micProducer) {
+ window.CLIENT.muteMic()
+ clearInterval(autoMuter)
+ }
+ }
+ const autoMuter = setInterval(autoMute, 100)
+ })
+ })
}
const numberToWords = num => {