Closed virtyos closed 2 years ago
my app code:
import 'package:flutter/material.dart';
import 'dart:async';
import 'dart:developer';
import 'package:tflite_audio/tflite_audio.dart';
import 'package:flutter_unity_widget/flutter_unity_widget.dart';
import 'package:flutter/services.dart';
import 'package:ripple_button/ripple_button.dart';
void main() {
WidgetsFlutterBinding.ensureInitialized();
SystemChrome.setEnabledSystemUIMode(SystemUiMode.manual,
overlays: [SystemUiOverlay.bottom]);
SystemChrome.setPreferredOrientations(
[DeviceOrientation.portraitUp, DeviceOrientation.portraitDown]).then((_) {
runApp(MyApp());
});
}
class MyApp extends StatefulWidget {
const MyApp({Key? key}) : super(key: key);
@override
_MyAppState createState() => _MyAppState();
}
class _MyAppState extends State<MyApp> with WidgetsBindingObserver {
final GlobalKey<ScaffoldState> _scaffoldKey = GlobalKey<ScaffoldState>();
final isRecording = ValueNotifier<bool>(false);
Stream<Map<dynamic, dynamic>>? result;
final String model = 'assets/decoded_wav_model.tflite';
final String label = 'assets/decoded_wav_label.txt';
final String audioDirectory = 'assets/sample_audio_16k_mono.wav';
final String inputType = 'decodedWav';
final int sampleRate = 16000;
final int bufferSize = 2000;
// final int audioLength = 16000;
///Optional parameters you can adjust to modify your input and output
final bool outputRawScores = false;
final int numOfInferences = 5;
final int numThreads = 1;
final bool isAsset = true;
///Adjust the values below when tuning model detection.
final double detectionThreshold = 0.3;
final int averageWindowDuration = 1000;
final int minimumTimeBetweenSamples = 30;
final int suppressionTime = 1500;
late UnityWidgetController unityWidgetController;
String listenResult = '';
late double screenWidth;
late double screenHeight;
final double labelPadding = 20.0;
final double labelMargin = 20.0;
@override
void initState() {
super.initState();
WidgetsBinding.instance.addObserver(this);
TfliteAudio.loadModel(
// numThreads: this.numThreads,
// isAsset: this.isAsset,
// outputRawScores: outputRawScores,
inputType: inputType,
model: model,
label: label,
);
//spectrogram parameters
// TfliteAudio.setSpectrogramParameters(nFFT: 256, hopLength: 129);
// mfcc parameters
TfliteAudio.setSpectrogramParameters(nMFCC: 40, hopLength: 16384);
}
@override
void dispose() {
//unityWidgetController?.dispose();
// ignore: avoid_print
print('Dispose used');
WidgetsBinding.instance.removeObserver(this);
super.dispose();
}
@override
void didChangeAppLifecycleState(AppLifecycleState state) {
print("STATE " + state.toString());
switch (state) {
case AppLifecycleState.resumed:
print("RESUMED");
unityWidgetController?.resume();
break;
case AppLifecycleState.paused:
print("PAUSED");
unityWidgetController?.pause();
break;
default:
}
}
@override
Widget build(BuildContext context) {
return MaterialApp(
home: WillPopScope(
onWillPop: () {
unityWidgetController?.quit();
return Future.value(true);
},
child: Scaffold(
key: _scaffoldKey,
///Streambuilder for inference results
body: Builder(builder: (BuildContext context) {
screenWidth = MediaQuery.of(context).size.width;
screenHeight = MediaQuery.of(context).size.height;
return _container();
}))));
}
Widget _container() {
return Column(children: [
_unityBlock(),
_labelBlock(),
_listenButtonBlock(),
]);
}
Widget _listenButtonBlock() {
return Container(
width: screenWidth - labelMargin * 2,
margin: EdgeInsets.only(top: labelMargin, bottom: labelMargin),
child: Center(
child: RippleButton(
"Нажми и скажи UP",
padding: EdgeInsets.all(16),
isEnabled: !isRecording.value,
type: RippleButtonType.BLUE_TELEGRAM,
onPressed: () {
isRecording.value = true;
setState(() {});
_listen();
},
style: RippleButtonStyle(height: 60.0),
color: RippleButtonColor(
foreground: Colors.blue,
background: Colors.blue,
overlay: Colors.blue),
),
),
);
}
void _listen() {
result = TfliteAudio.startAudioRecognition(
sampleRate: sampleRate,
bufferSize: bufferSize,
numOfInferences: numOfInferences,
);
result?.listen((event) {
listenResult = event["recognitionResult"].toString();
log("Recognition Result: " + event["recognitionResult"].toString());
TfliteAudio.stopAudioRecognition();
}).onDone(() async {
isRecording.value = false;
if (listenResult == 'up') {
unityWidgetController?.postMessage('Player1', 'Jump', '');
await Future.delayed(const Duration(milliseconds: 1900), () {
unityWidgetController?.postMessage('Player1', 'JumpOff', '');
});
}
setState(() {});
});
}
Widget _labelBlock() {
return Container(
child: Center(
child: Text(
_labelText(),
textAlign: TextAlign.center,
style:
TextStyle(height: 1.4, fontSize: 20.0, fontWeight: FontWeight.bold),
)),
width: screenWidth - labelMargin * 2,
height: screenHeight / 4,
padding: EdgeInsets.all(labelPadding),
margin: EdgeInsets.only(top: labelMargin, bottom: labelMargin),
decoration: BoxDecoration(
borderRadius: BorderRadius.circular(10),
border: Border.all(color: Colors.blueAccent),
color: Colors.white,
boxShadow: [
BoxShadow(color: Colors.blue, spreadRadius: 1),
],
),
);
}
String _labelText() {
print('RESULT ' + listenResult.toString());
if (isRecording.value) {
return 'Слушаю Вас...';
}
if (listenResult == '_silence_') {
return 'Мне кажется, Вы молчите. У меня нет времени вас ждать! Скорее говорите UP!';
}
if (listenResult == '_unknown_') {
return 'Я не понимаю, что Вы говорите :(';
}
if (listenResult == 'up') {
return 'Ура, вы сказали UP';
}
if (listenResult != '') {
return 'Что вы несете! Скажите просто UP';
}
return 'Нажмите кнопку ниже, скажите UP, и я прыгну';
}
Widget _unityBlock() {
return Container(
width: screenWidth,
height: screenHeight / 2,
child: UnityWidget(
//borderRadius: BorderRadius.all(Radius.circular(70)),
onUnityCreated: onUnityCreated,
useAndroidViewSurface: true,
),
);
}
void onUnityCreated(controller) async {
print("PLAYER CREATED");
unityWidgetController = controller;
/*bool? isLoad = await unityWidgetController.isLoaded();
print("IS LOADED " + isLoad.toString());
bool? isReady = await unityWidgetController.isReady();
print("IS READY " + isReady.toString());
await unityWidgetController.pause();
print(controller.hashCode);
print(unityWidgetController);
print("UNITY CREATE");
Future.delayed(
Duration(milliseconds: 100),
() async {
await this.unityWidgetController.resume();
setState(() {
});
},
);*/
}
}
Hi @virtyos
Thats quite an unusual error.
To help diagnose your problem, can you:
Stream arguments: ["method": setAudioRecognitionStream, "suppressionTime": 0, "numOfInferences": 5, "minimumTimeBetweenSamples": 0, "audioLength": 0, "sampleRate": 16000, "bufferSize": 2000, "detectionThreshold": 0.3, "averageWindowDuration": 0]
AudioLength: 16000
Permission granted
start microphone
2022-07-31 17:32:35.763752+0300 Runner[295:8249] flutter: RESULT
2022-07-31 17:32:35.556164+0300 Runner[295:8219] [aurioc] 1029: failed: -10851 (enable 1, outf< 2 ch, 0 Hz, Float32, non-inter> inf< 2 ch, 0 Hz, Float32, non-inter>)
2022-07-31 17:32:35.800125+0300 Runner[295:8219] [avae] AVAEInternal.h:70:_AVAE_Check: required condition is false: [AVAEGraphNode.mm:857:CreateRecordingTap: (IsFormatSampleRateAndChannelCountValid(format))]
2022-07-31 17:32:35.822019+0300 Runner[295:8219] Uncaught exception: com.apple.coreaudio.avfaudio: required condition is false: IsFormatSampleRateAndChannelCountValid(format)
(
0 CoreFoundation 0x000000019a76b198 <redacted> + 252
1 libobjc.A.dylib 0x00000001999439f8 objc_exception_throw + 56
2 CoreFoundation 0x000000019a68488c <redacted> + 0
3 AVFAudio 0x00000001a05e1244 <redacted> + 56
4 AVFAudio 0x00000001a05e087c <redacted> + 284
5 AVFAudio 0x00000001a0637384 <redacted> + 244
6 AVFAudio 0x00000001a0613e70 <redacted> + 252
7 AVFAudio 0x00000001a068c91c <redacted> + 16
8 AVFAudio 0x00000001a067bbe8 <redacted> + 228
9 Runner 0x0000000100539994 $s12tflite_audio9RecordingC5startyyF + 1164
10 Runner 0x000000010054bc7c $s12tflite_audio22SwiftTfliteAudioPluginC15startMicrophoneyyFyycfU_ + 972
11 Runner 0x0000000100549ff8 $sIeg_IeyB_TR + 52
12 libdispatch.dylib 0x000000010935b6f4 _dispatch_call_block_and_release + 24
13 libdispatch.dylib 0x000000010935cc78 _dispatch_client_callout + 16
14 libdispatch.dylib 0x0000000109364bf4 _dispatch_lane_serial_drain + 712
15 libdispatch.dylib 0x00000001093658b4 _dispatch_lane_invoke + 456
16 libdispatch.dylib 0x000000010936f77c _dispatch_workloop_worker_thread + 1148
17 libsystem_pthread.dylib 0x000000019a38a114 _pthread_wqthread + 304
18 libsystem_pthread.dylib 0x000000019a38ccd4 start_wqthread + 4
)
2022-07-31 17:32:35.823325+0300 Runner[295:8219] *** Terminating app due to uncaught exception 'com.apple.coreaudio.avfaudio', reason: 'required condition is false: IsFormatSampleRateAndChannelCountValid(format)'
*** First throw call stack:
(0x19a76b180 0x1999439f8 0x19a68488c 0x1a05e1244 0x1a05e087c 0x1a0637384 0x1a0613e70 0x1a068c91c 0x1a067bbe8 0x100539994 0x10054bc7c 0x100549ff8 0x10935b6f4 0x10935cc78 0x109364bf4 0x1093658b4 0x10936f77c 0x19a38a114 0x19a38ccd4)
libc++abi.dylib: terminating with uncaught exception of type NSException
*** Terminating app due to uncaught exception 'com.apple.coreaudio.avfaudio', reason: 'required condition is false: IsFormatSampleRateAndChannelCountValid(format)'
terminating with uncaught exception of type NSException
the example runs correctly
Hi, many thanks for the responses. They’re very helpful.
A hypothesis, are you using another plugins to record data? If yes, I suspect that there’s a conflict between this plugin and another plugin for iOS. I believe this other plugin is using AVFaudio in conjunction with the tflite_audio plugin, hence the error. According to here, only one audio tap can be run/installed.
If this is the case, is it possible to run both plugins sequentially, rather than in parallel?
Yes, you are right. I use flutter unity widget (https://github.com/juicycleff/flutter-unity-view-widget), the app works correctly if to disable this plugin.
Thanks for ur help
Getting this error every time when I'm trying to listen to sounds
exception 'com.apple.coreaudio.avfaudio', reason: 'required condition is false: IsFormatSampleRateAndChannelCountValid(format)'
only on IOS (12, debug mode), on Android everything is ok