alphacep / vosk-flutter

Apache License 2.0
50 stars 30 forks source link

whenever i transcribe audio to text by using this package its giving me a empty json , how to solve this #25

Open kuberanb opened 5 months ago

kuberanb commented 5 months ago

`import 'dart:developer'; import 'dart:io'; import 'dart:typed_data'; import 'package:flutter/services.dart'; import 'package:vosk_flutter/vosk_flutter.dart';

class VoskTranscriber { late VoskFlutterPlugin _vosk; Recognizer? _recognizer;

VoskTranscriber() { initializeVosk(); }

Future initializeVosk() async { Model? _model; const _sampleRate = 16000; String? _error; SpeechService? _speechService; print("_initializeVosk called"); _vosk = VoskFlutterPlugin.instance(); final modelPath = await ModelLoader() .loadFromAssets('assets/models/vosk-model-small-en-us-0.15.zip') .then( (modelPath) => _vosk.createModel(modelPath)) // create model object .then((model) => model = model) .then(() => _vosk.createRecognizer( model: _model!, sampleRate: _sampleRate)) // create recognizer .then((value) => _recognizer = value) .then((recognizer) { if (Platform.isAndroid) { _vosk .initSpeechService(_recognizer!) // init speech service .then((speechService) => _speechService = speechService) .catchError((e) => _error = e.toString()); print("sampleRate : ${_sampleRate}"); } }).catchError((e) { _error = e.toString(); return null; });

// print("modelPath :$modelPath");
// _recognizer = await _vosk.createRecognizer(
//     model: Model(modelPath, const MethodChannel("")), sampleRate: 16000);

}

Future transcribeAudio(Uint8List audioBytes) async { print("transcribeAudio function called"); List results = []; int chunkSize = 8192; int pos = 0;

while (pos + chunkSize < audioBytes.length) {
  final resultReady = await _recognizer!.acceptWaveformBytes(
    Uint8List.fromList(audioBytes.sublist(pos, pos + chunkSize)),
  );
  pos += chunkSize;

  if (resultReady) {
    String result = await _recognizer!.getResult();
    log("complete result in vosk : ${result}");
    results.add(result);
  } else {
    String result = await _recognizer!.getPartialResult();

    log("partial result in vosk : ${result}");
    results.add(result);
  }
}

await _recognizer!
    .acceptWaveformBytes(Uint8List.fromList(audioBytes.sublist(pos)));
results.add(await _recognizer!.getFinalResult());

return results.join(' ');

} } `

log

og] partial result in vosk : { "partial" : "" } [log] complete result in vosk : { "text" : "" } 15 [log] partial result in vosk : { "partial" : "" } I/flutter (10378): transcribedText : { I/flutter (10378): "partial" : "" I/flutter (10378): } { I/flutter (10378): "partial" : "" I/flutter (10378): } {

ulisseshen commented 4 months ago

same here. I trying the exemple project in this repo.

EDIT

The problem its because of emulator. In real device worked fine. Good luck with your solution

kuberanb commented 4 months ago

@ulisseshen final speechService = await vosk.initSpeechService(recognizer); speechService.onPartial().forEach((partial) => print(partial)); speechService.onResult().forEach((result) => print(result)); await speechService.start(); i used this code and now it worked