abhishekove / Teachable

A Flutter Package to use Google Teachable Machine. To make a Pose classifier
https://pub.dev/packages/teachable
MIT License
7 stars 2 forks source link

Unable to use this package for audio model #2

Open vyk1262 opened 1 year ago

vyk1262 commented 1 year ago

Is it because for pose --> mp[element["className"]] = element["probability"] is used but for audio result contains only ressult.scores. Can you please check main.dart

void main() async {
  WidgetsFlutterBinding.ensureInitialized();

  await Permission.camera.request();
  await Permission.microphone.request();
  runApp(MyApp());
}

class MyApp extends StatelessWidget {
  @override
  Widget build(BuildContext context) {
    return MaterialApp(
      title: 'Flutter Demo',
      theme: ThemeData(
        primarySwatch: Colors.blue,
      ),
      home: MyHomePage(title: 'Flutter Demo Home Page'),
    );
  }
}

class MyHomePage extends StatefulWidget {
  MyHomePage({Key? key, required this.title}) : super(key: key);

  final String title;

  @override
  _MyHomePageState createState() => _MyHomePageState();
}

class _MyHomePageState extends State<MyHomePage> {
  String pose = "";
  @override
  Widget build(BuildContext context) {
    return Scaffold(
        appBar: AppBar(title: Text("Audio Classifier")),
        body: Stack(
          children: [
            Container(
                child: Column(children: <Widget>[
              Expanded(
                child: Container(
                  child: Teachable(
                    path: "assets/original.html",
                    results: (res) {
                      var resp = jsonDecode(res);
                      setState(() {
                        pose = (resp[1] * 100.0).toString();
                      });
                    },
                  ),
                ),
              ),
            ])),
            Align(
              alignment: Alignment.bottomCenter,
              child: Container(
                  width: double.infinity,
                  height: 50,
                  decoration: BoxDecoration(
                    color: Colors.black.withOpacity(0.5),
                  ),
                  child: Column(
                    mainAxisAlignment: MainAxisAlignment.spaceBetween,
                    children: [
                      Row(
                        mainAxisAlignment: MainAxisAlignment.spaceEvenly,
                        children: [
                          Text(
                            "background noise",
                            style: TextStyle(
                              color: Colors.white,
                            ),
                          ),
                          Text(
                            pose,
                            style: TextStyle(
                              color: Colors.white,
                            ),
                          ),
                        ],
                      ),
                    ],
                  )),
            )
          ],
        ));
  }
}

and index.html

<div>Teachable Machine Audio Model</div>
<button type="button" onclick="init()">Start</button>
<div id="label-container"></div>
<script src="https://cdn.jsdelivr.net/npm/@tensorflow/tfjs@1.3.1/dist/tf.min.js"></script>
<script src="https://cdn.jsdelivr.net/npm/@tensorflow-models/speech-commands@0.4.0/dist/speech-commands.min.js"></script>

<script type="text/javascript">
    // more documentation available at
    // https://github.com/tensorflow/tfjs-models/tree/master/speech-commands

    // the link to your model provided by Teachable Machine export panel
    // const URL = "./my_model/";
    const URL = "https://teachablemachine.withgoogle.com/models/1VpiPU9J_/";

    async function createModel() {
        const checkpointURL = URL + "model.json"; // model topology
        const metadataURL = URL + "metadata.json"; // model metadata

        const recognizer = speechCommands.create(
            "BROWSER_FFT", // fourier transform type, not useful to change
            undefined, // speech commands vocabulary feature, not useful for your models
            checkpointURL,
            metadataURL);

        // check that model and metadata are loaded via HTTPS requests.
        await recognizer.ensureModelLoaded();

        return recognizer;
    }

    async function init() {
        const recognizer = await createModel();
        const classLabels = recognizer.wordLabels(); // get class labels
        const labelContainer = document.getElementById("label-container");
        for (let i = 0; i < classLabels.length; i++) {
            labelContainer.appendChild(document.createElement("div"));
        }

        // listen() takes two arguments:
        // 1. A callback function that is invoked anytime a word is recognized.
        // 2. A configuration object with adjustable fields
        recognizer.listen(result => {
            const scores = result; // probability of prediction for each class
            const regScores = Array.from(scores);
            // render the probability scores per class
            console.log(scores);
            for (let i = 0; i < classLabels.length; i++) {
                const classPrediction = classLabels[i] + ": " + result.scores[i].toFixed(2);
                labelContainer.childNodes[i].innerHTML = classPrediction;
                // console.log(classPrediction)
            }
            try {
                // Code to run
                //  updater(ans,score);
                window.flutter_inappwebview.callHandler('updater', scores);
                //  [break;]
            }

            catch (e) {
                // Code to run if an exception occurs
                //  [break;]
            }
        }, {
            includeSpectrogram: true, // in case listen should return result.spectrogram
            probabilityThreshold: 0.75,
            invokeCallbackOnNoiseAndUnknown: true,
            overlapFactor: 0.50 // probably want between 0.5 and 0.75. More info in README
        });

        // Stop the recognition in 5 seconds.
        // setTimeout(() => recognizer.stopListening(), 5000);
    }
</script>
vyk1262 commented 1 year ago

Change the Teachable custom widget like this

onWebViewCreated: (InAppWebViewController controller) async {
          var _webViewController = controller;
          _webViewController.addJavaScriptHandler(
              handlerName: "updater",
              callback: (args) {
                List<dynamic> predictions = args[0];
                widget.results!(JsonEncoder().convert(predictions));
              });
        },

and use it as below

Teachable(
                    path: "assets/original.html",
                    results: (res) {
                      var resp = jsonDecode(res);
                      setState(() {
                        score = (resp[0] * 100.0)
                            .toString();
                        print(score);
                      });
                    },
                  ),

with the above changes I am able to print probabilities of the issues