diff --git a/mycroft_classic_listener/__main__.py b/mycroft_classic_listener/__main__.py index b80dfa7..a790454 100644 --- a/mycroft_classic_listener/__main__.py +++ b/mycroft_classic_listener/__main__.py @@ -12,194 +12,11 @@ # See the License for the specific language governing permissions and # limitations under the License. # -from threading import Lock -from ovos_backend_client.identity import IdentityManager -from ovos_bus_client.message import Message -from ovos_config import Configuration -from ovos_utils import ( - create_daemon, - wait_for_exit_signal -) -from ovos_utils.enclosure.api import EnclosureAPI -from ovos_utils.log import LOG from ovos_bus_client.client import MessageBusClient -from ovos_utils.process_utils import ProcessStatus, StatusCallbackMap +from ovos_utils import wait_for_exit_signal -from mycroft_classic_listener.listener import RecognizerLoop - -bus = None # Mycroft messagebus connection -lock = Lock() -loop = None -config = None - - -def handle_record_begin(): - """Forward internal bus message to external bus.""" - LOG.info("Begin Recording...") - context = {'client_name': 'mycroft_listener', - 'source': 'audio'} - bus.emit(Message('recognizer_loop:record_begin', context=context)) - - -def handle_record_end(): - """Forward internal bus message to external bus.""" - LOG.info("End Recording...") - context = {'client_name': 'mycroft_listener', - 'source': 'audio'} - bus.emit(Message('recognizer_loop:record_end', context=context)) - - -def handle_no_internet(): - LOG.debug("Notifying enclosure of no internet connection") - context = {'client_name': 'mycroft_listener', - 'source': 'audio'} - bus.emit(Message('enclosure.notify.no_internet', context=context)) - - -def handle_awoken(): - """Forward mycroft.awoken to the messagebus.""" - LOG.info("Listener is now Awake: ") - context = {'client_name': 'mycroft_listener', - 'source': 'audio'} - bus.emit(Message('mycroft.awoken', context=context)) - - -def handle_wakeword(event): - LOG.info("Wakeword Detected: " + event['utterance']) - bus.emit(Message('recognizer_loop:wakeword', event)) - - -def handle_utterance(event): - LOG.info("Utterance: " + str(event['utterances'])) - context = {'client_name': 'mycroft_listener', - 'source': 'audio', - 'destination': ["skills"]} - if 'ident' in event: - ident = event.pop('ident') - context['ident'] = ident - bus.emit(Message('recognizer_loop:utterance', event, context)) - - -def handle_unknown(): - context = {'client_name': 'mycroft_listener', - 'source': 'audio'} - bus.emit(Message('mycroft.speech.recognition.unknown', context=context)) - - -def handle_speak(event): - """ - Forward speak message to message bus. - """ - context = {'client_name': 'mycroft_listener', - 'source': 'audio'} - bus.emit(Message('speak', event, context)) - - -def handle_sleep(event): - """Put the recognizer loop to sleep.""" - loop.sleep() - - -def handle_wake_up(event): - """Wake up the the recognize loop.""" - loop.awaken() - - -def handle_mic_mute(event): - """Mute the listener system.""" - loop.mute() - - -def handle_mic_unmute(event): - """Unmute the listener system.""" - loop.unmute() - - -def handle_mic_listen(_): - """Handler for mycroft.mic.listen. - - Starts listening as if wakeword was spoken. - """ - loop.responsive_recognizer.trigger_listen() - - -def handle_mic_get_status(event): - """Query microphone mute status.""" - data = {'muted': loop.is_muted()} - bus.emit(event.response(data)) - - -def handle_paired(event): - """Update identity information with pairing data. - - This is done here to make sure it's only done in a single place. - TODO: Is there a reason this isn't done directly in the pairing skill? - """ - IdentityManager.update(event.data) - - -def handle_audio_start(event): - """Mute recognizer loop.""" - if config.get("listener").get("mute_during_output"): - loop.mute() - - -def handle_audio_end(event): - """Request unmute, if more sources have requested the mic to be muted - it will remain muted. - """ - if config.get("listener").get("mute_during_output"): - loop.unmute() # restore - - -def handle_stop(event): - """Handler for mycroft.stop, i.e. button press.""" - loop.force_unmute() - - -def handle_open(): - # TODO: Move this into the Enclosure (not speech client) - # Reset the UI to indicate ready for speech processing - EnclosureAPI(bus).reset() - - -def on_ready(): - LOG.info('Speech client is ready.') - - -def on_stopping(): - LOG.info('Speech service is shutting down...') - - -def on_error(e='Unknown'): - LOG.error('Audio service failed to launch ({}).'.format(repr(e))) - - -def connect_loop_events(loop): - loop.on('recognizer_loop:utterance', handle_utterance) - loop.on('recognizer_loop:speech.recognition.unknown', handle_unknown) - loop.on('speak', handle_speak) - loop.on('recognizer_loop:record_begin', handle_record_begin) - loop.on('recognizer_loop:awoken', handle_awoken) - loop.on('recognizer_loop:wakeword', handle_wakeword) - loop.on('recognizer_loop:record_end', handle_record_end) - loop.on('recognizer_loop:no_internet', handle_no_internet) - - -def connect_bus_events(bus): - # Register handlers for events on main Mycroft messagebus - bus.on('open', handle_open) - bus.on('recognizer_loop:sleep', handle_sleep) - bus.on('recognizer_loop:wake_up', handle_wake_up) - bus.on('mycroft.mic.mute', handle_mic_mute) - bus.on('mycroft.mic.unmute', handle_mic_unmute) - bus.on('mycroft.mic.get_status', handle_mic_get_status) - bus.on('mycroft.mic.listen', handle_mic_listen) - bus.on("mycroft.paired", handle_paired) - bus.on('recognizer_loop:audio_output_start', handle_audio_start) - bus.on('recognizer_loop:audio_output_end', handle_audio_end) - bus.on('mycroft.stop', handle_stop) +from mycroft_classic_listener.service import ClassicListener, on_error, on_stopping, on_ready def main(ready_hook=on_ready, error_hook=on_error, stopping_hook=on_stopping, @@ -208,25 +25,19 @@ def main(ready_hook=on_ready, error_hook=on_error, stopping_hook=on_stopping, global loop global config try: - config = Configuration() bus = MessageBusClient() bus.run_in_thread() - connect_bus_events(bus) - callbacks = StatusCallbackMap(on_ready=ready_hook, on_error=error_hook, - on_stopping=stopping_hook) - status = ProcessStatus('speech', bus, callbacks) + service = ClassicListener(bus, ready_hook, + error_hook, + stopping_hook, + watchdog) + service.daemon = True + service.start() - # Register handlers on internal RecognizerLoop bus - loop = RecognizerLoop(watchdog) - connect_loop_events(loop) - create_daemon(loop.run) - status.set_started() except Exception as e: error_hook(e) else: - status.set_ready() wait_for_exit_signal() - status.set_stopping() bus.close() diff --git a/mycroft_classic_listener/mic.py b/mycroft_classic_listener/mic.py index 89b4655..e9f83ba 100644 --- a/mycroft_classic_listener/mic.py +++ b/mycroft_classic_listener/mic.py @@ -329,6 +329,7 @@ def __init__(self, wake_word_recognizer, watchdog=None): self.wake_word_name = wake_word_recognizer.key_phrase self.overflow_exc = listener_config.get('overflow_exception', False) + self.signals_enabled = listener_config.get("enabled_signals", False) super().__init__() self.wake_word_recognizer = wake_word_recognizer @@ -348,7 +349,7 @@ def __init__(self, wake_word_recognizer, watchdog=None): if self.save_utterances and not isdir(self.saved_utterances_dir): os.mkdir(self.saved_utterances_dir) - self.mic_level_file = os.path.join(get_ipc_directory(), "mic_level") + self.mic_level_file = os.path.join(get_ipc_directory(config=self.config), "mic_level") # Signal statuses self._stop_signaled = False @@ -480,18 +481,18 @@ def _skip_wake_word(self): if self._listen_triggered: return True - # Pressing the Mark 1 button can start recording (unless - # it is being used to mean 'stop' instead) - if check_for_signal('buttonPress', 1): - # give other processes time to consume this signal if - # it was meant to be a 'stop' - sleep(0.25) - if check_for_signal('buttonPress'): - # Signal is still here, assume it was intended to - # begin recording - LOG.debug("Button Pressed, wakeword not needed") - return True - + if self.signals_enabled: + # Pressing the Mark 1 button can start recording (unless + # it is being used to mean 'stop' instead) + if check_for_signal('buttonPress', 1): + # give other processes time to consume this signal if + # it was meant to be a 'stop' + sleep(0.25) + if check_for_signal('buttonPress'): + # Signal is still here, assume it was intended to + # begin recording + LOG.debug("Button Pressed, wakeword not needed") + return True return False def stop(self): diff --git a/mycroft_classic_listener/service.py b/mycroft_classic_listener/service.py new file mode 100644 index 0000000..d77a851 --- /dev/null +++ b/mycroft_classic_listener/service.py @@ -0,0 +1,255 @@ +# Copyright 2017 Mycroft AI Inc. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +from threading import Lock +from threading import Thread + +from ovos_backend_client.identity import IdentityManager +from ovos_bus_client.client import MessageBusClient +from ovos_bus_client.message import Message +from ovos_config import Configuration +from ovos_utils import ( + create_daemon, + wait_for_exit_signal +) +from ovos_utils.enclosure.api import EnclosureAPI +from ovos_utils.log import LOG +from ovos_utils.process_utils import ProcessStatus, StatusCallbackMap + +from mycroft_classic_listener.listener import RecognizerLoop + +bus = None # Mycroft messagebus connection +lock = Lock() +loop = None +config = Configuration() + + +def handle_record_begin(): + """Forward internal bus message to external bus.""" + LOG.info("Begin Recording...") + context = {'client_name': 'mycroft_listener', + 'source': 'audio'} + bus.emit(Message('recognizer_loop:record_begin', context=context)) + + +def handle_record_end(): + """Forward internal bus message to external bus.""" + LOG.info("End Recording...") + context = {'client_name': 'mycroft_listener', + 'source': 'audio'} + bus.emit(Message('recognizer_loop:record_end', context=context)) + + +def handle_no_internet(): + LOG.debug("Notifying enclosure of no internet connection") + context = {'client_name': 'mycroft_listener', + 'source': 'audio'} + bus.emit(Message('enclosure.notify.no_internet', context=context)) + + +def handle_awoken(): + """Forward mycroft.awoken to the messagebus.""" + LOG.info("Listener is now Awake: ") + context = {'client_name': 'mycroft_listener', + 'source': 'audio'} + bus.emit(Message('mycroft.awoken', context=context)) + + +def handle_wakeword(event): + LOG.info("Wakeword Detected: " + event['utterance']) + bus.emit(Message('recognizer_loop:wakeword', event)) + + +def handle_utterance(event): + LOG.info("Utterance: " + str(event['utterances'])) + context = {'client_name': 'mycroft_listener', + 'source': 'audio', + 'destination': ["skills"]} + if 'ident' in event: + ident = event.pop('ident') + context['ident'] = ident + bus.emit(Message('recognizer_loop:utterance', event, context)) + + +def handle_unknown(): + context = {'client_name': 'mycroft_listener', + 'source': 'audio'} + bus.emit(Message('mycroft.speech.recognition.unknown', context=context)) + + +def handle_speak(event): + """ + Forward speak message to message bus. + """ + context = {'client_name': 'mycroft_listener', + 'source': 'audio'} + bus.emit(Message('speak', event, context)) + + +def handle_sleep(event): + """Put the recognizer loop to sleep.""" + loop.sleep() + + +def handle_wake_up(event): + """Wake up the the recognize loop.""" + loop.awaken() + + +def handle_mic_mute(event): + """Mute the listener system.""" + loop.mute() + + +def handle_mic_unmute(event): + """Unmute the listener system.""" + loop.unmute() + + +def handle_mic_listen(_): + """Handler for mycroft.mic.listen. + + Starts listening as if wakeword was spoken. + """ + loop.responsive_recognizer.trigger_listen() + + +def handle_mic_get_status(event): + """Query microphone mute status.""" + data = {'muted': loop.is_muted()} + bus.emit(event.response(data)) + + +def handle_paired(event): + """Update identity information with pairing data. + + This is done here to make sure it's only done in a single place. + TODO: Is there a reason this isn't done directly in the pairing skill? + """ + IdentityManager.update(event.data) + + +def handle_audio_start(event): + """Mute recognizer loop.""" + if config.get("listener").get("mute_during_output"): + loop.mute() + + +def handle_audio_end(event): + """Request unmute, if more sources have requested the mic to be muted + it will remain muted. + """ + if config.get("listener").get("mute_during_output"): + loop.unmute() # restore + + +def handle_stop(event): + """Handler for mycroft.stop, i.e. button press.""" + loop.force_unmute() + + +def handle_open(): + # TODO: Move this into the Enclosure (not speech client) + # Reset the UI to indicate ready for speech processing + EnclosureAPI(bus).reset() + + +def on_ready(): + LOG.info('Speech client is ready.') + + +def on_stopping(): + LOG.info('Speech service is shutting down...') + + +def on_error(e='Unknown'): + LOG.error('Audio service failed to launch ({}).'.format(repr(e))) + + +def connect_loop_events(loop): + loop.on('recognizer_loop:utterance', handle_utterance) + loop.on('recognizer_loop:speech.recognition.unknown', handle_unknown) + loop.on('speak', handle_speak) + loop.on('recognizer_loop:record_begin', handle_record_begin) + loop.on('recognizer_loop:awoken', handle_awoken) + loop.on('recognizer_loop:wakeword', handle_wakeword) + loop.on('recognizer_loop:record_end', handle_record_end) + loop.on('recognizer_loop:no_internet', handle_no_internet) + + +def connect_bus_events(bus): + # Register handlers for events on main Mycroft messagebus + bus.on('open', handle_open) + bus.on('recognizer_loop:sleep', handle_sleep) + bus.on('recognizer_loop:wake_up', handle_wake_up) + bus.on('mycroft.mic.mute', handle_mic_mute) + bus.on('mycroft.mic.unmute', handle_mic_unmute) + bus.on('mycroft.mic.get_status', handle_mic_get_status) + bus.on('mycroft.mic.listen', handle_mic_listen) + bus.on("mycroft.paired", handle_paired) + bus.on('recognizer_loop:audio_output_start', handle_audio_start) + bus.on('recognizer_loop:audio_output_end', handle_audio_end) + bus.on('mycroft.stop', handle_stop) + + +class ClassicListener(Thread): + + def __init__(self, new_bus, ready_hook=on_ready, + error_hook=on_error, stopping_hook=on_stopping, + watchdog=lambda: None): + super().__init__() + global bus, loop + bus = new_bus + connect_bus_events(bus) + callbacks = StatusCallbackMap(on_ready=ready_hook, on_error=error_hook, + on_stopping=stopping_hook) + self.status = ProcessStatus('speech', bus, callbacks) + + # Register handlers on internal RecognizerLoop bus + loop = RecognizerLoop(watchdog) + connect_loop_events(loop) + + self.status.set_started() + + def run(self) -> None: + global loop + self.status.set_ready() + loop.run() + self.status.set_stopping() + + +def main(ready_hook=on_ready, error_hook=on_error, stopping_hook=on_stopping, + watchdog=lambda: None): + global bus + global loop + global config + try: + bus = MessageBusClient() + bus.run_in_thread() + service = ClassicListener(bus,ready_hook, + error_hook, + stopping_hook, + watchdog) + service.daemon = True + service.start() + + except Exception as e: + error_hook(e) + else: + wait_for_exit_signal() + bus.close() + + +if __name__ == "__main__": + main()