felixjunghans / google_speech

Flutter google spech
MIT License
69 stars 42 forks source link

Mic Stream Example Bug #24

Closed YuxuanShah closed 2 years ago

YuxuanShah commented 2 years ago

I have this problem when using the microphone to stream image Below is my code


import 'dart:async';

import 'package:flutter/material.dart';
import 'package:flutter/services.dart';
import 'package:google_speech/google_speech.dart';
import 'package:rxdart/rxdart.dart';
import 'package:sound_stream/sound_stream.dart';

void main() {
  runApp(MyApp());
}

class MyApp extends StatelessWidget {
  // This widget is the root of your application.
  @override
  Widget build(BuildContext context) {
    return MaterialApp(
      title: 'Mic Stream Example',
      theme: ThemeData(
        primarySwatch: Colors.blue,
        visualDensity: VisualDensity.adaptivePlatformDensity,
      ),
      home: AudioRecognize(),
    );
  }
}

class AudioRecognize extends StatefulWidget {
  @override
  State<StatefulWidget> createState() => _AudioRecognizeState();
}

class _AudioRecognizeState extends State<AudioRecognize> {
  final RecorderStream _recorder = RecorderStream();

  bool recognizing = false;
  bool recognizeFinished = false;
  String text = '';
  late StreamSubscription<List<int>> _audioStreamSubscription;
  late BehaviorSubject<List<int>> _audioStream;

  @override
  void initState() {
    super.initState();

    _recorder.initialize();
  }

  void streamingRecognize() async {
    _audioStream = BehaviorSubject<List<int>>();
    _audioStreamSubscription = _recorder.audioStream.listen((event) {
      _audioStream.add(event);
    });

    await _recorder.start();

    setState(() {
      recognizing = true;
    });
    final serviceAccount = ServiceAccount.fromString(
        (await rootBundle.loadString('assets/test_service_account.json')));
    final speechToText = SpeechToText.viaServiceAccount(serviceAccount);
    final config = _getConfig();

    final responseStream = speechToText.streamingRecognize(
        StreamingRecognitionConfig(config: config, interimResults: true),
        _audioStream);

    var responseText = '';

    responseStream.listen((data) {
      final currentText =
          data.results.map((e) => e.alternatives.first.transcript).join('\n');

      if (data.results.first.isFinal) {
        responseText += '\n' + currentText;
        setState(() {
          text = responseText;
          recognizeFinished = true;
        });
      } else {
        setState(() {
          text = responseText + '\n' + currentText;
          recognizeFinished = true;
        });
      }
    }, onDone: () {
      setState(() {
        recognizing = false;
      });
    });
  }

  void stopRecording() async {
    await _recorder.stop();
    await _audioStreamSubscription.cancel();
    await _audioStream.close();
    setState(() {
      recognizing = false;
    });
  }

  RecognitionConfig _getConfig() => RecognitionConfig(
      encoding: AudioEncoding.LINEAR16,
      model: RecognitionModel.basic,
      enableAutomaticPunctuation: true,
      sampleRateHertz: 16000,
      languageCode: 'en-US');

  @override
  Widget build(BuildContext context) {
    return Scaffold(
      appBar: AppBar(
        title: Text('Audio File Example'),
      ),
      body: Center(
        child: Column(
          mainAxisAlignment: MainAxisAlignment.spaceAround,
          children: <Widget>[
            if (recognizeFinished)
              _RecognizeContent(
                text: text,
              ),
            RaisedButton(
              onPressed: recognizing ? stopRecording : streamingRecognize,
              child: recognizing
                  ? Text('Stop recording')
                  : Text('Start Streaming from mic'),
            ),
          ],
        ),
      ), // This trailing comma makes auto-formatting nicer for build methods.
    );
  }
}

class _RecognizeContent extends StatelessWidget {
  final String text;

  const _RecognizeContent({Key? key, required this.text}) : super(key: key);

  @override
  Widget build(BuildContext context) {
    return Padding(
      padding: const EdgeInsets.all(16.0),
      child: Column(
        children: <Widget>[
          Text(
            'The text recognized by the Google Speech Api:',
          ),
          SizedBox(
            height: 16.0,
          ),
          Text(
            text,
            style: Theme.of(context).textTheme.bodyText1,
          ),
        ],
      ),
    );
  }
}
felixjunghans commented 2 years ago

Hi I just renewed the Mic Stream example and it looks like the error is not occurring.

Mic Stream example

Feel free to reopen this issue if you have any further questions

akhilgorantala commented 1 year ago

I/AudioRecord(18421): start(5206): return status 0 E/flutter (18421): [ERROR:flutter/runtime/dart_vm_initializer.cc(41)] Unhandled Exception: FormatException: Unexpected end of input (at character 1) E/flutter (18421): E/flutter (18421): ^ E/flutter (18421): E/flutter (18421): #0 _ChunkedJsonParser.fail (dart:convert-patch/convert_patch.dart:1383:5) E/flutter (18421): #1 _ChunkedJsonParser.close (dart:convert-patch/convert_patch.dart:501:7) E/flutter (18421): #2 _parseJson (dart:convert-patch/convert_patch.dart:36:10) E/flutter (18421): #3 JsonDecoder.convert (dart:convert/json.dart:610:36) E/flutter (18421): #4 JsonCodec.decode (dart:convert/json.dart:216:41) E/flutter (18421): #5 jsonDecode (dart:convert/json.dart:155:10) E/flutter (18421): #6 new ServiceAccountAuthenticator (package:grpc/src/auth/authio.dart:30:11) E/flutter (18421): #7 new ServiceAccount. (package:google_speech/speech_client_authenticator.dart:35:26) E/flutter (18421): #8 new ServiceAccount.fromString (package:google_speech/speech_client_authenticator.dart:73:27) E/flutter (18421): #9 _AudioRecognizeState.streamingRecognize (package:untitled4/main.dart:64:43) E/flutter (18421): E/flutter (18421): D/AudioRecordExtImpl(18421): setAudioBoostTid D/AudioRecordExtImpl(18421): has setAudioBoostTid D/AudioRecordExtImpl(18421): setAudioReadBoostTid