diff --git a/3_HeyRobot/voice/Hey Robot.pmdl b/3_HeyRobot/voice/Hey Robot.pmdl new file mode 100644 index 0000000..59e1f26 Binary files /dev/null and b/3_HeyRobot/voice/Hey Robot.pmdl differ diff --git a/3_HeyRobot/voice/assistant_grpc_demo_snowboy.py b/3_HeyRobot/voice/assistant_grpc_demo_snowboy.py new file mode 100644 index 0000000..241fd5b --- /dev/null +++ b/3_HeyRobot/voice/assistant_grpc_demo_snowboy.py @@ -0,0 +1,77 @@ +#!/usr/bin/env python3 +# Copyright 2017 Google Inc. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +"""A demo of the Google Assistant GRPC recognizer.""" + +import argparse +import locale +import logging +import signal +import sys +from light import Light + +from aiy.assistant.grpc import AssistantServiceClientWithLed +from aiy.board import Board +from gpiozero import LED +from gpiozero import PWMLED + +import mod.snowboydecoder as snowboydecoder + +def volume(string): + value = int(string) + if value < 0 or value > 100: + raise argparse.ArgumentTypeError('Volume must be in [0...100] range.') + return value + +def locale_language(): + language, _ = locale.getdefaultlocale() + return language + +interrupted = False + +def signal_handler(signal, frame): + global interrupted + interrupted = True + +def interrupt_callback(): + global interrupted + return interrupted + +def main(): + logging.basicConfig(level=logging.DEBUG) + signal.signal(signal.SIGTERM, lambda signum, frame: sys.exit(0)) + + parser = argparse.ArgumentParser(description='Assistant service example.') + parser.add_argument('--language', default=locale_language()) + parser.add_argument('--volume', type=volume, default=80) + parser.add_argument('--model', default='src/examples/voice/Hey Robot.pmdl') + args = parser.parse_args() + + detector = snowboydecoder.HotwordDetector(args.model, sensitivity=0.5) + with Board() as board: + assistant = AssistantServiceClientWithLed(board=board, + volume_percentage=args.volume, + language_code=args.language) + while True: + #green_led = Light(18) + #red_led = Light(17) + logging.info('Speak own hotword and speak') + detector.start() + logging.info('Conversation started!') + assistant.conversation() + + +if __name__ == '__main__': + main() diff --git a/3_HeyRobot/voice/assistant_library_with_local_commands_demo.py b/3_HeyRobot/voice/assistant_library_with_local_commands_demo.py new file mode 100644 index 0000000..482757e --- /dev/null +++ b/3_HeyRobot/voice/assistant_library_with_local_commands_demo.py @@ -0,0 +1,90 @@ +#!/usr/bin/env python3 +# Copyright 2017 Google Inc. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +"""Run a recognizer using the Google Assistant Library. + +The Google Assistant Library has direct access to the audio API, so this Python +code doesn't need to record audio. Hot word detection "OK, Google" is supported. + +It is available for Raspberry Pi 2/3 only; Pi Zero is not supported. +""" + +import logging +import platform +import subprocess +import sys + +from google.assistant.library.event import EventType + +from aiy.assistant import auth_helpers +from aiy.assistant.library import Assistant +from aiy.board import Board, Led +from aiy.voice import tts + +def power_off_pi(): + tts.say('Good bye!') + subprocess.call('sudo shutdown now', shell=True) + + +def reboot_pi(): + tts.say('See you in a bit!') + subprocess.call('sudo reboot', shell=True) + + +def say_ip(): + ip_address = subprocess.check_output("hostname -I | cut -d' ' -f1", shell=True) + tts.say('My IP address is %s' % ip_address.decode('utf-8')) + + +def process_event(assistant, led, event): + logging.info(event) + if event.type == EventType.ON_START_FINISHED: + led.state = Led.BEACON_DARK # Ready. + print('Say "OK, Google" then speak, or press Ctrl+C to quit...') + elif event.type == EventType.ON_CONVERSATION_TURN_STARTED: + led.state = Led.ON # Listening. + elif event.type == EventType.ON_RECOGNIZING_SPEECH_FINISHED and event.args: + print('You said:', event.args['text']) + text = event.args['text'].lower() + if text == 'power off': + assistant.stop_conversation() + power_off_pi() + elif text == 'reboot': + assistant.stop_conversation() + reboot_pi() + elif text == 'ip address': + assistant.stop_conversation() + say_ip() + elif event.type == EventType.ON_END_OF_UTTERANCE: + led.state = Led.PULSE_QUICK # Thinking. + elif (event.type == EventType.ON_CONVERSATION_TURN_FINISHED + or event.type == EventType.ON_CONVERSATION_TURN_TIMEOUT + or event.type == EventType.ON_NO_RESPONSE): + led.state = Led.BEACON_DARK # Ready. + elif event.type == EventType.ON_ASSISTANT_ERROR and event.args and event.args['is_fatal']: + sys.exit(1) + + +def main(): + logging.basicConfig(level=logging.INFO) + + credentials = auth_helpers.get_assistant_credentials() + with Board() as board, Assistant(credentials) as assistant: + for event in assistant.start(): + process_event(assistant, board.led, event) + + +if __name__ == '__main__': + main() diff --git a/3_HeyRobot/voice/assistant_library_with_snowboy_demo.py b/3_HeyRobot/voice/assistant_library_with_snowboy_demo.py new file mode 100644 index 0000000..8ff100f --- /dev/null +++ b/3_HeyRobot/voice/assistant_library_with_snowboy_demo.py @@ -0,0 +1,135 @@ +#!/usr/bin/env python3 +# Copyright 2017 Google Inc. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +"""Run a recognizer using the Google Assistant Library with button support. + +The Google Assistant Library has direct access to the audio API, so this Python +code doesn't need to record audio. Hot word detection "OK, Google" is supported. + +It is available for Raspberry Pi 2/3 only; Pi Zero is not supported. +""" + +import logging +import platform +import sys +import threading + +import aiy.assistant.auth_helpers +from aiy.assistant.library import Assistant +import aiy.voicehat +from google.assistant.library.event import EventType + +import mod.snowboydecoder as snowboydecoder + +if len(sys.argv) == 1: + print("Error: need to specify model name") + print("Usage: python demo.py your.model") + sys.exit(-1) + +model = sys.argv[1] + +logger = logging.getLogger("main") +logging.basicConfig( + level=logging.INFO, + format="[%(asctime)s] %(levelname)s:%(name)s:%(message)s" +) + + +class MyAssistant(object): + """An assistant that runs in the background. + + The Google Assistant Library event loop blocks the running thread entirely. + To support the button trigger, we need to run the event loop in a separate + thread. Otherwise, the on_button_pressed() method will never get a chance to + be invoked. + """ + + def __init__(self): + self._task = threading.Thread(target=self._run_task) + self._hotword = threading.Thread(target=self._run_hotword) + self._can_start_conversation = False + self._assistant = None + + def start(self): + """Starts the assistant. + + Starts the assistant event loop and begin processing events. + """ + self._task.start() + self._hotword.start() + + def _run_task(self): + credentials = aiy.assistant.auth_helpers.get_assistant_credentials() + with Assistant(credentials) as assistant: + self._assistant = assistant + for event in assistant.start(): + self._process_event(event) + + def _run_hotword(self): + detector = snowboydecoder.HotwordDetector(model, sensitivity=0.5) + with aiy.audio.get_recorder(): + while True: + if self._can_start_conversation: + detector.start(detected_callback=self._on_button_pressed, + interrupt_check=lambda: not(self._can_start_conversation), + sleep_time=0.03) + detector.terminate() + + def _process_event(self, event): + logger.debug(event) + status_ui = aiy.voicehat.get_status_ui() + if event.type == EventType.ON_START_FINISHED: + status_ui.status('ready') + self._can_start_conversation = True + # Start the voicehat button trigger. + aiy.voicehat.get_button().on_press(self._on_button_pressed) + if sys.stdout.isatty(): + print('Say "OK, Google" or press the button, then speak. ' + 'Press Ctrl+C to quit...') + + elif event.type == EventType.ON_CONVERSATION_TURN_STARTED: + self._can_start_conversation = False + status_ui.status('listening') + + elif event.type == EventType.ON_END_OF_UTTERANCE: + status_ui.status('thinking') + + elif (event.type == EventType.ON_CONVERSATION_TURN_FINISHED + or event.type == EventType.ON_CONVERSATION_TURN_TIMEOUT + or event.type == EventType.ON_NO_RESPONSE): + status_ui.status('ready') + self._can_start_conversation = True + + elif event.type == EventType.ON_ASSISTANT_ERROR and event.args and event.args['is_fatal']: + sys.exit(1) + + def _on_button_pressed(self): + # Check if we can start a conversation. 'self._can_start_conversation' + # is False when either: + # 1. The assistant library is not yet ready; OR + # 2. The assistant library is already in a conversation. + if self._can_start_conversation: + self._assistant.start_conversation() + + +def main(): + if platform.machine() == 'armv6l': + print('Cannot run hotword demo on Pi Zero!') + exit(-1) + MyAssistant().start() + + +if __name__ == '__main__': + main() diff --git a/3_HeyRobot/voice/light.py b/3_HeyRobot/voice/light.py new file mode 100644 index 0000000..f4fd709 --- /dev/null +++ b/3_HeyRobot/voice/light.py @@ -0,0 +1,40 @@ +import RPi.GPIO as GPIO +import time + +class Light(object): + def __init__(self, port): + self.port = port + GPIO.setmode(GPIO.BCM) + GPIO.setup(self.port, GPIO.OUT) + self.on_state = GPIO.HIGH + self.off_state = not self.on_state + + def set_on(self): + GPIO.output(self.port, self.on_state) + + def set_off(self): + GPIO.output(self.port, self.off_state) + + def is_on(self): + return GPIO.input(self.port) == self.on_state + + def is_off(self): + return GPIO.input(self.port) == self.off_state + + def toggle(self): + if self.is_on(): + self.set_off() + else: + self.set_on() + + def blink(self, t=0.3): + self.set_off() + self.set_on() + time.sleep(t) + self.set_off() + +if __name__ == "__main__": + light = Light(17) + while True: + light.blink() + time.sleep(0.7) diff --git a/3_HeyRobot/voice/voice_recorder.py b/3_HeyRobot/voice/voice_recorder.py new file mode 100644 index 0000000..677d3a0 --- /dev/null +++ b/3_HeyRobot/voice/voice_recorder.py @@ -0,0 +1,36 @@ +import argparse +import time +import threading + +from aiy.board import Board +from aiy.voice.audio import AudioFormat, play_wav, record_file, Recorder + +def main(): + parser = argparse.ArgumentParser() + parser.add_argument('--filename', '-f', default='recording.wav') + args = parser.parse_args() + + with Board() as board: + print('Press button to start recording.') + board.button.wait_for_press() + + done = threading.Event() + board.button.when_pressed = done.set + + def wait(): + start = time.monotonic() + while not done.is_set(): + duration = time.monotonic() - start + print('Recording: %.02f seconds [Press button to stop]' % duration) + time.sleep(0.5) + + record_file(AudioFormat.CD, filename=args.filename, wait=wait, filetype='wav') + print('Press button to play recorded sound.') + board.button.wait_for_press() + + print('Playing...') + play_wav(args.filename) + print('Done.') + +if __name__ == '__main__': + main()