run method

  1. @override
Future<void> run()
override

This application uses vosk speech-to-text to listen to audio from the host mic, convert to text, and send the text to the Frame in real-time. It has a running main loop in this function and also on the Frame (frame_app.lua)

Implementation

@override
Future<void> run() async {
  currentState = ApplicationState.running;
  _partialResult = '';
  _finalResult = '';
  if (mounted) setState(() {});

  try {
    var (ok, audioRecorder, audioSampleBufferedStream) = await startAudio();
    if (!ok) {
      currentState = ApplicationState.ready;
      if (mounted) setState(() {});
      return;
    }

    String prevText = '';

    // loop over the incoming audio data and send reults to Frame
    await for (var audioSample in audioSampleBufferedStream!) {
      // if the user has clicked Stop we want to jump out of the main loop and stop processing
      if (currentState != ApplicationState.running) {
        break;
      }

      // recognizer blocks until it has something
      final resultReady = await _recognizer
          .acceptWaveformBytes(Uint8List.fromList(audioSample));

      // TODO consider enabling alternatives, and word times, and ...?
      String text = resultReady
          ? jsonDecode(await _recognizer.getResult())['text']
          : jsonDecode(await _recognizer.getPartialResult())['partial'];

      // If the text is the same as the previous one, we don't send it to Frame and force a redraw
      // The recognizer often produces a bunch of empty string in a row too, so this means
      // we send the first one (clears the display) but not subsequent ones
      // Often the final result matches the last partial, so if it's a final result then show it
      // on the phone but don't send it
      if (text == prevText) {
        if (resultReady) {
          setState(() {
            _finalResult = text;
            _partialResult = '';
          });
        }
        continue;
      } else if (text.isEmpty) {
        // turn the empty string into a single space and send
        // still can't put it through the wrapped-text-chunked-sender
        // because it will be zero bytes payload so no message will
        // be sent.
        // Users might say this first empty partial
        // comes a bit soon and hence the display is cleared a little sooner
        // than they want (not like audio hangs around in the air though
        // after words are spoken!)
        await frame!.sendMessage(TxPlainText(msgCode: 0x0b, text: ' '));
        prevText = '';
        continue;
      }

      if (_log.isLoggable(Level.FINE)) {
        _log.fine('Recognized text: $text');
      }

      // send current text to Frame
      String wrappedText = TextUtils.wrapText(text, 640, 4).join('\n');
      await frame!.sendMessage(TxPlainText(msgCode: 0x0b, text: wrappedText));

      // update the phone UI too
      setState(
          () => resultReady ? _finalResult = text : _partialResult = text);
      prevText = text;
    }

    await stopAudio(audioRecorder!);
  } catch (e) {
    _log.fine('Error executing application logic: $e');
  }

  currentState = ApplicationState.ready;
  if (mounted) setState(() {});
}