Flutter Piano Audio Detection implemented with Tensorflow Lite Model (Google Magenta)
To keep this project alive, consider giving a star or a like. Contributors are also welcome.
First, Add tensorflow lite file in your project. Copy the downloaded onsets_frames_wavinput.tflite.
Android : Copy the downloaded file YourApp/android/app/src/main/assets
iOS : Navigator -> Build Phases -> Copy Bundle Resourse
If you have experience installing other plugins, it should be very simple.
<key>NSMicrophoneUsageDescription</key>
<string>Your Text</string>
platform :ios, '12.1' // or higher version.
// ...
post_install do |installer|
installer.pods_project.targets.each do |target|
target.build_configurations.each do |config|
config.build_settings['GCC_PREPROCESSOR_DEFINITIONS'] ||= [
'$(inherited)',
## dart: PermissionGroup.microphone
'PERMISSION_MICROPHONE=1',
]
end
end
end
<uses-permission android:name="android.permission.RECORD_AUDIO" />
aaptOptions {
noCompress 'tflite'
}
Please look at the example on how to implement these futures.
Add line in pubspec.yaml
dependencies:
flutter_piano_audio_detection: ${version}
Usage in Flutter Code
import 'package:flutter_piano_audio_detection/flutter_piano_audio_detection.dart';
// ...
class _YourAppState extends State<MyApp> {
FlutterPianoAudioDetection fpad = new FlutterPianoAudioDetection();
Stream<List<dynamic>>? result;
List<String> notes = [];
// ...
@override
void initState() {
super.initState();
fpad.prepare();
}
void start() {
fpad.start(); // Start Engine
getResult(); // Event Subscription
}
void stop() {
fpad.stop(); // Stop Engine
}
void getResult() {
result = fpad.startAudioRecognition();
result!.listen((event) {
setState(() {
notes = fpad.getNotes(event); // notes = [C3, D3]
});
});
}
// ...
}
MIT