vosk_flutter_service 0.0.1
vosk_flutter_service: ^0.0.1 copied to clipboard
Flutter plugin for offline speech recognition using the Vosk speech recognition toolkit.
example/lib/main.dart
import 'dart:async';
import 'dart:io';
import 'package:file_picker/file_picker.dart';
import 'package:flutter/material.dart';
import 'package:path_provider/path_provider.dart';
import 'package:record/record.dart';
import 'package:vosk_flutter_service/vosk_flutter.dart';
void main() {
runApp(const MyApp());
}
class MyApp extends StatelessWidget {
const MyApp({final Key? key}) : super(key: key);
@override
Widget build(final BuildContext context) => const MaterialApp(
home: VoskFlutterDemo(),
);
}
class VoskFlutterDemo extends StatefulWidget {
const VoskFlutterDemo({final Key? key}) : super(key: key);
@override
State<VoskFlutterDemo> createState() => _VoskFlutterDemoState();
}
class _VoskFlutterDemoState extends State<VoskFlutterDemo> {
static const _textStyle = TextStyle(fontSize: 30, color: Colors.black);
static const _modelName = 'vosk-model-small-en-us-0.15';
static const _sampleRate = 16000;
final VoskFlutterPlugin _vosk = VoskFlutterPlugin();
final ModelLoader _modelLoader = ModelLoader();
late final AudioRecorder _recorder;
bool _recognitionStarted = false;
String? _error;
Model? _model;
// The recognizer is nullable because it is initialized asynchronously.
Recognizer? recognizer;
SpeechService? _speechService;
String? _fileRecognitionResult;
@override
void initState() {
super.initState();
_recorder = AudioRecorder();
unawaited(_initModel());
}
Future<void> _initModel() async {
try {
final modelsList = await _modelLoader.loadModelsList();
final modelDescription = modelsList.firstWhere(
(final model) => model.name == _modelName,
);
final modelPath =
await _modelLoader.loadFromNetwork(modelDescription.url);
final model = await _vosk.createModel(modelPath);
setState(() => _model = model);
final localRecognizer = await _vosk.createRecognizer(
model: _model!,
sampleRate: _sampleRate,
);
setState(() => recognizer = localRecognizer);
if (Platform.isAndroid) {
final speechService = await _vosk.initSpeechService(recognizer!);
setState(() => _speechService = speechService);
}
} on Object catch (e) {
setState(() => _error = e.toString());
}
}
@override
Widget build(final BuildContext context) {
if (_error != null) {
return Scaffold(
body: Center(child: Text('Error: $_error', style: _textStyle)),
);
} else if (_model == null) {
return const Scaffold(
body: Center(child: Text('Loading model...', style: _textStyle)),
);
} else if (recognizer == null) {
return const Scaffold(
body: Center(child: Text('Loading recognizer...', style: _textStyle)),
);
} else if (Platform.isAndroid && _speechService == null) {
return const Scaffold(
body: Center(
child: Text('Initializing speech service...', style: _textStyle),
),
);
} else {
return Platform.isAndroid ? _androidExample() : _commonExample();
}
}
Widget _androidExample() => Scaffold(
body: Center(
child: Column(
mainAxisAlignment: MainAxisAlignment.center,
children: [
ElevatedButton(
onPressed: () async {
if (_recognitionStarted) {
await _speechService!.stop();
} else {
await _speechService!.start();
}
setState(() => _recognitionStarted = !_recognitionStarted);
},
child: Text(
_recognitionStarted
? 'Stop recognition'
: 'Start recognition',
),
),
StreamBuilder(
stream: _speechService!.onPartial(),
builder: (final context, final snapshot) => Text(
'Partial result: ${snapshot.data}',
style: _textStyle,
),
),
StreamBuilder(
stream: _speechService!.onResult(),
builder: (final context, final snapshot) => Text(
'Result: ${snapshot.data}',
style: _textStyle,
),
),
],
),
),
);
Widget _commonExample() => Scaffold(
body: Center(
child: Column(
mainAxisAlignment: MainAxisAlignment.center,
children: [
ElevatedButton(
onPressed: _recognitionStarted
? () async {
await _stopRecording();
setState(() => _recognitionStarted = false);
}
: () async {
await _recordAudio();
setState(() => _recognitionStarted = true);
},
child: Text(
_recognitionStarted ? 'Stop recording' : 'Record audio',
),
),
Text(
'Final recognition result: $_fileRecognitionResult',
style: _textStyle,
),
const SizedBox(height: 20),
ElevatedButton(
onPressed: () async {
final filePickerResult =
await FilePicker.platform.pickFiles();
final file = filePickerResult?.files.single;
if (file != null) {
await _recognizeFile(file.path!);
}
},
child: const Text('Pick audio file'),
),
],
),
),
);
@override
void dispose() {
unawaited(_recorder.dispose());
super.dispose();
}
Future<void> _recordAudio() async {
try {
if (await _recorder.hasPermission()) {
await _recorder.start(
const RecordConfig(
encoder: AudioEncoder.wav,
sampleRate: _sampleRate,
numChannels: 1,
),
path: '${(await getTemporaryDirectory()).path}/audio.wav',
);
}
} on Object catch (e) {
_setError(e.toString());
}
}
Future<void> _stopRecording() async {
try {
final path = await _recorder.stop();
if (path != null) {
await _recognizeFile(path);
}
} on Object catch (e) {
_setError(e.toString());
}
}
Future<void> _recognizeFile(final String path) async {
try {
final bytes = File(path).readAsBytesSync();
await recognizer!.acceptWaveformBytes(bytes);
final result = await recognizer!.getFinalResult();
setState(() => _fileRecognitionResult = result);
} on Object catch (e) {
_setError(e.toString());
}
}
void _setError(final String error) {
setState(() => _error = error);
}
}