goodatlas / react-native-audio-record

Audio record buffers for React Native (iOS and Android)
MIT License
175 stars 101 forks source link

TypeError: undefined is not an object(evaluating '_reactNativeAudioRecord.AudioRecord.init') #40

Open rohith2k opened 4 years ago

rohith2k commented 4 years ago

My code is almost same as App1.js but i'm getting warning as TypeError: undefined is not an object(evaluating '_reactNativeAudioRecord.AudioRecord.init') when i'm calling init and also similar warning for other calls (start(),stop() etc.) .Also I'm getting undefined from stop( ) call which should return path of the audio file. My Code:- class App extends Component { sound = null; state = { audioFile: '',

recording: false,

loaded: false,

paused: true }; async componentDidMount() { await this.checkPermission(); // await this.checkPermission1(); // await this.checkPermission2(); const options = { sampleRate: 16000,

channels: 1,

bitsPerSample: 16,

audioSource: 1,

};

AudioRecord.init(options); // AudioRecord.on('data', data => {

// const chunk = Buffer.from(data, 'base64');

// console.log('chunk size', chunk.byteLength);

// // do something with audio chunk

// }); } checkPermission = async () => { try { const granted = await PermissionsAndroid.requestMultiple( [PermissionsAndroid.PERMISSIONS.READ_EXTERNAL_STORAGE,PermissionsAndroid.PERMISSIONS.WRITE_EXTERNAL_STORAGE,PermissionsAndroid.PERMISSIONS.RECORD_AUDIO], { title: "Audio Recording Permission", message:"PERSMISSION RO RECORD AUDIO", buttonNeutral: "Ask Me Later", buttonNegative: "Cancel", buttonPositive: "OK" } ); if (granted === PermissionsAndroid.RESULTS.GRANTED) { console.log("You can use the camera"); } else { console.log("Camera permission denied"); } } catch (err) { console.warn(err); } }; start =async () => { console.log('start record');

this.setState({ audioFile: '', recording: true, loaded: false });

AudioRecord.start(); }; stop = async () => { if (!this.state.recording) return;

console.log('stop record'); try{ var audioFile = await AudioRecord.stop(); } catch(err){ console.warn(err); } console.log('audioFile', audioFile);

this.setState({ audioFile, recording: false }); }; load = () => { return new Promise((resolve, reject) => {

if (!this.state.audioFile) {

return reject('file path is empty');

}

this.sound = new Sound(this.state.audioFile, '', error => {

if (error) {

  console.log('failed to load the file', error);

  return reject(error);

}

this.setState({ loaded: true });

return resolve();

});

}); }; play = async () => { if (!this.state.loaded) {

try {

await this.load();

} catch (error) {

console.log(error);

}

}

this.setState({ paused: false });

Sound.setCategory('Playback');

this.sound.play(success => {

if (success) {

console.log('successfully finished playing');

} else {

console.log('playback failed due to audio decoding errors');

}

this.setState({ paused: true });

// this.sound.release();

}); }; pause = () => { this.sound.pause();

this.setState({ paused: true }); }; render() { const { recording, paused, audioFile } = this.state;

return (

Darkmift commented 3 years ago

Start by removing irrelevant code to the problem.

yakovd33 commented 3 years ago

Start by removing irrelevant code to the problem.

The same happens for me if you can help me

import { StatusBar } from 'expo-status-bar'; import React from 'react'; import { StyleSheet, Text, View } from 'react-native'; import AudioRecord from 'react-native-audio-record'; import { Audio } from 'expo-av';

Audio.requestPermissionsAsync();

export default function App() {

const options = { sampleRate: 16000, // default 44100 channels: 1, // 1 or 2, default 1 bitsPerSample: 16, // 8 or 16, default 16 audioSource: 6, // android only wavFile: 'test.wav' // default 'audio.wav' };

** // The code that's not working: // Error msg: TypeError: Cannot read property 'init' of undefined

AudioRecord.init(options);

// Start Recording
AudioRecord.start();

// Stop Recording
AudioRecord.stop();

**

return (

Open up App.js to start working on your app!

); }

alanwjlu commented 3 years ago

Did anyone figure this out? I am having the same issue.

raulgonzalezdev commented 1 year ago

Dear, after making the above recommendations, now the log is sending me the following error message

LOG Este es el error: {"err": "User: arn:aws:sts::649856891400:assumed-role/amplify-allymobile-dev-82957-unauthRole/CognitoIdentityCredentials is not authorized to perform: transcribe:StartStreamTranscriptionWebSocket because no identity-based policy allows the transcribe:StartStreamTranscriptionWebSocket action"}

Anexo Config

`/ eslint-disable / // WARNING: DO NOT EDIT. This file is automatically generated by AWS Amplify. It will be overwritten.

const awsmobile = { "aws_project_region": "us-east-1", "aws_cognito_identity_pool_id": "us-east-1:75f67dbf-ab80-45bc-9fb3-df7013a96567", "aws_cognito_region": "us-east-1", "aws_user_pools_id": "us-east-1_0apKjplE2", "aws_user_pools_web_client_id": "7753944gfoetpm9nnq1758olob", "oauth": {}, "aws_cognito_username_attributes": [], "aws_cognito_social_providers": [], "aws_cognito_signup_attributes": [ "EMAIL" ], "aws_cognito_mfa_configuration": "OFF", "aws_cognito_mfa_types": [ "SMS" ], "aws_cognito_password_protection_settings": { "passwordPolicyMinLength": 8, "passwordPolicyCharacters": [] }, "aws_cognito_verification_mechanisms": [ "EMAIL" ], "predictions": { "convert": { "transcription": { "region": "us-east-1", "proxy": false, "defaults": { "language": "en-GB" } }, "speechGenerator": { "region": "us-east-1", "proxy": false, "defaults": { "VoiceId": "Ricardo", "LanguageCode": "pt-BR" } } } }, "Statement": [ { "Effect": "Allow", "Sid": "transcribestreaming", "Action": [ "translate:TranslateText", "polly:SynthesizeSpeech", "transcribe:StartStreamTranscriptionWebSocket", "comprehend:DetectSentiment", "comprehend:DetectEntities", "comprehend:DetectDominantLanguage", "comprehend:DetectSyntax", "comprehend:DetectKeyPhrases", "rekognition:DetectFaces", "rekognition:RecognizeCelebrities", "rekognition:DetectLabels", "rekognition:DetectModerationLabels", "rekognition:DetectText", "rekognition:DetectLabel", "textract:AnalyzeDocument", "textract:DetectDocumentText", "textract:GetDocumentAnalysis", "textract:StartDocumentAnalysis", "textract:StartDocumentTextDetection", "rekognition:SearchFacesByImage", "transcribe:" ], "Resource": [ "" ] } ] };

export default awsmobile; `

sepeechAlly-template.json { "AWSTemplateFormatVersion": "2010-09-09", "Description": "{\"createdOn\":\"Linux\",\"createdBy\":\"Amplify\",\"createdWith\":\"9.2.1\",\"stackType\":\"predictions-Polly\",\"metadata\":{}}", "Parameters": { "authRoleName": { "Type": "String" }, "unauthRoleName": { "Type": "String" }, "convertPolicyName": { "Type": "String" }, "convertType": { "Type": "String" }, "access": { "Type": "String" }, "resourceName": { "Type": "String" }, "language": { "Type": "String" }, "voice": { "Type": "String" }, "env": { "Type": "String" } }, "Conditions": { "AuthGuestRoleAccess": { "Fn::Equals": [ { "Ref": "access" }, "authAndGuest" ] } }, "Outputs": { "region": { "Value": { "Fn::FindInMap": [ "RegionMapping", { "Ref": "AWS::Region" }, { "Ref": "convertType" } ] } }, "language": { "Value": { "Ref": "language" } }, "voice": { "Value": { "Ref": "voice" } } }, "Resources": { "PollyPolicy": { "Type": "AWS::IAM::Policy", "Properties": { "PolicyName": { "Ref": "convertPolicyName" }, "Roles": { "Fn::If": [ "AuthGuestRoleAccess", [ { "Ref": "authRoleName" }, { "Ref": "unauthRoleName" } ], [ { "Ref": "authRoleName" } ] ] }, "PolicyDocument": { "Version": "2012-10-17", "Statement": [ { "Effect": "Allow", "Sid": "transcribestreaming", "Action": [ "polly:SynthesizeSpeech", "transcribe:", "transcribe:StartStreamTranscriptionWebSocket" ], "Resource": "" } ] } } } }, "Mappings": { "RegionMapping": { "us-east-1": { "speechGenerator": "us-east-1", "transcribe": "us-east-1", "translate": "us-east-1", "transcribestreaming": "us-east-1" } } } }