The code is as follows,it’s the code from the official document
import 'package:flutter/material.dart';
import 'package:speech_to_text/speech_recognition_result.dart';
import 'package:speech_to_text/speech_to_text.dart';
class MyPage extends StatefulWidget {
MyPage({Key? key}) : super(key: key);
@override
_MyPageState createState() => _MyPageState();
}
class _MyPageState extends State<MyPage> {
SpeechToText _speechToText = SpeechToText();
bool _speechEnabled = false;
String _lastWords = '';
@override
void initState() {
super.initState();
_initSpeech();
}
/// This has to happen only once per app
void _initSpeech() async {
_speechEnabled = await _speechToText.initialize();
setState(() {});
}
/// Each time to start a speech recognition session
void _startListening() async {
await _speechToText.listen(onResult: _onSpeechResult);
setState(() {});
}
/// Manually stop the active speech recognition session
/// Note that there are also timeouts that each platform enforces
/// and the SpeechToText plugin supports setting timeouts on the
/// listen method.
void _stopListening() async {
await _speechToText.stop();
setState(() {});
}
/// This is the callback that the SpeechToText plugin calls when
/// the platform returns recognized words.
void _onSpeechResult(SpeechRecognitionResult result) {
setState(() {
_lastWords = result.recognizedWords;
});
}
@override
Widget build(BuildContext context) {
return Scaffold(
appBar: AppBar(
title: Text('Speech Demo'),
),
body: Center(
child: Column(
mainAxisAlignment: MainAxisAlignment.center,
children: <Widget>[
Container(
padding: EdgeInsets.all(16),
child: Text(
'Recognized words:',
style: TextStyle(fontSize: 20.0),
),
),
Expanded(
child: Container(
padding: EdgeInsets.all(16),
child: Text(
// If listening is active show the recognized words
_speechToText.isListening
? '$_lastWords'
// If listening isn't active but could be tell the user
// how to start it, otherwise indicate that speech
// recognition is not yet ready or not supported on
// the target device
: _speechEnabled
? 'Tap the microphone to start listening...'
: 'Speech not available',
),
),
),
],
),
),
floatingActionButton: FloatingActionButton(
onPressed:
// If not yet listening for speech start, otherwise stop
_speechToText.isNotListening ? _startListening : _stopListening,
tooltip: 'Listen',
child: Icon(_speechToText.isNotListening ? Icons.mic_off : Icons.mic),
),
);
}
}
The error is reported as follow
W/Bundle (11745): Key results_recognition expected ArrayList<String> but value was a java.lang.String. The default value <null> was returned.
W/Bundle (11745): Attempt to cast generated internal exception:
W/Bundle (11745): java.lang.ClassCastException: java.lang.String cannot be cast to java.util.ArrayList
W/Bundle (11745): at android.os.BaseBundle.getStringArrayList(BaseBundle.java:1288)
W/Bundle (11745): at android.os.Bundle.getStringArrayList(Bundle.java:1062)
W/Bundle (11745): at com.csdcorp.speech_to_text.SpeechToTextPlugin.updateResults(SpeechToTextPlugin.kt:444)
W/Bundle (11745): at com.csdcorp.speech_to_text.SpeechToTextPlugin.onResults(SpeechToTextPlugin.kt:706)
W/Bundle (11745): at android.speech.SpeechRecognizer$InternalListener$1.handleMessage(SpeechRecognizer.java:457)
W/Bundle (11745): at android.os.Handler.dispatchMessage(Handler.java:106)
W/Bundle (11745): at android.os.Looper.loop(Looper.java:207)
W/Bundle (11745): at android.app.ActivityThread.main(ActivityThread.java:6878)
W/Bundle (11745): at java.lang.reflect.Method.invoke(Native Method)
W/Bundle (11745): at com.android.internal.os.RuntimeInit$MethodAndArgsCaller.run(RuntimeInit.java:547)
W/Bundle (11745): at com.android.internal.os.ZygoteInit.main(ZygoteInit.java:876)
Interesting, this means that the Android speech libraries are unexpectedly returning a single string instead of an array. I've never seen this behaviour. What Android version and device is this on?
The code is as follows,it’s the code from the official document
The error is reported as follow
Can any friend give me an answer? Thank you!