Open sayan801 opened 10 years ago
@aparnaSubr I checked your code.. I think you are making some mistake while saving the audio file.. Try to know better about the audio format.. I found a code called wave demo where it showed how to save .wav files.. https://sites.google.com/site/musicgapi/User-guide-v1-4-x/wave-demo
You need to choose a specific format of audio ( aac, mp3, wav) and work on it specifically.. Later you can extend it for multiple audio formats..
Wave Format Tutorial : https://ccrma.stanford.edu/courses/422/projects/WaveFormat/
/**
* Save the wave file
*
* @param filename
* filename to be saved
*
* @see wave file saved
*/
public void saveWaveAsFile(String filename){
WaveHeader waveHeader=wave.getWaveHeader();
int byteRate = waveHeader.getByteRate();
int audioFormat = waveHeader.getAudioFormat();
int sampleRate = waveHeader.getSampleRate();
int bitsPerSample = waveHeader.getBitsPerSample();
int channels = waveHeader.getChannels();
long chunkSize = waveHeader.getChunkSize();
long subChunk1Size = waveHeader.getSubChunk1Size();
long subChunk2Size = waveHeader.getSubChunk2Size();
int blockAlign = waveHeader.getBlockAlign();
try {
FileOutputStream fos = new FileOutputStream(filename);
fos.write(WaveHeader.RIFF_HEADER.getBytes());
// little endian
fos.write(new byte[] { (byte) (chunkSize), (byte) (chunkSize >> 8),
(byte) (chunkSize >> 16), (byte) (chunkSize >> 24) });
fos.write(WaveHeader.WAVE_HEADER.getBytes());
fos.write(WaveHeader.FMT_HEADER.getBytes());
fos.write(new byte[] { (byte) (subChunk1Size),
(byte) (subChunk1Size >> 8), (byte) (subChunk1Size >> 16),
(byte) (subChunk1Size >> 24) });
fos.write(new byte[] { (byte) (audioFormat),
(byte) (audioFormat >> 8) });
fos.write(new byte[] { (byte) (channels), (byte) (channels >> 8) });
fos.write(new byte[] { (byte) (sampleRate),
(byte) (sampleRate >> 8), (byte) (sampleRate >> 16),
(byte) (sampleRate >> 24) });
fos.write(new byte[] { (byte) (byteRate), (byte) (byteRate >> 8),
(byte) (byteRate >> 16), (byte) (byteRate >> 24) });
fos.write(new byte[] { (byte) (blockAlign),
(byte) (blockAlign >> 8) });
fos.write(new byte[] { (byte) (bitsPerSample),
(byte) (bitsPerSample >> 8) });
fos.write(WaveHeader.DATA_HEADER.getBytes());
fos.write(new byte[] { (byte) (subChunk2Size),
(byte) (subChunk2Size >> 8), (byte) (subChunk2Size >> 16),
(byte) (subChunk2Size >> 24) });
fos.write(wave.getBytes());
fos.close();
} catch (FileNotFoundException e) {
e.printStackTrace();
} catch (IOException e) {
e.printStackTrace();
}
}
@aparnaSubr Could you explain your drain the audio data logic... I have taken the excerpt from your code below...
File inputFile = new File(filePath);
FileInputStream fis = new FileInputStream(inputFile);
File outputFile = new File(dstMediaPath);
if (outputFile.exists())
outputFile.delete();
MediaMuxer mux = new MediaMuxer(outputFile.getAbsolutePath(), MediaMuxer.OutputFormat.MUXER_OUTPUT_MPEG_4);
MediaFormat outputFormat = MediaFormat.createAudioFormat(COMPRESSED_AUDIO_FILE_MIME_TYPE,SAMPLING_RATE, 1);
outputFormat.setInteger(MediaFormat.KEY_AAC_PROFILE, MediaCodecInfo.CodecProfileLevel.AACObjectLC);
outputFormat.setInteger(MediaFormat.KEY_BIT_RATE, COMPRESSED_AUDIO_FILE_BIT_RATE);
MediaCodec codec = MediaCodec.createEncoderByType(COMPRESSED_AUDIO_FILE_MIME_TYPE);
codec.configure(outputFormat, null, null, MediaCodec.CONFIGURE_FLAG_ENCODE);
codec.start();
ByteBuffer[] codecInputBuffers = codec.getInputBuffers(); // Note: Array of buffers
ByteBuffer[] codecOutputBuffers = codec.getOutputBuffers();
MediaCodec.BufferInfo outBuffInfo = new MediaCodec.BufferInfo();
byte[] tempBuffer = new byte[BUFFER_SIZE];
boolean hasMoreData = true;
double presentationTimeUs = 0;
int audioTrackIdx = 0;
int totalBytesRead = 0;
int percentComplete;
do {
int inputBufIndex = 0;
while (inputBufIndex != -1 && hasMoreData) {
inputBufIndex = codec.dequeueInputBuffer(CODEC_TIMEOUT_IN_MS);
if (inputBufIndex >= 0) {
ByteBuffer dstBuf = codecInputBuffers[inputBufIndex];
dstBuf.clear();
int bytesRead = fis.read(tempBuffer, 0, dstBuf.limit());
if (bytesRead == -1) { // -1 implies EOS
hasMoreData = false;
codec.queueInputBuffer(inputBufIndex, 0, 0, (long) presentationTimeUs, MediaCodec.BUFFER_FLAG_END_OF_STREAM);
} else {
totalBytesRead += bytesRead;
dstBuf.put(tempBuffer, 0, bytesRead);
codec.queueInputBuffer(inputBufIndex, 0, bytesRead, (long) presentationTimeUs, 0);
presentationTimeUs = 1000000l * (totalBytesRead / 2) / SAMPLING_RATE;
}
}
}
// Drain audio
int outputBufIndex = 0;
while (outputBufIndex != MediaCodec.INFO_TRY_AGAIN_LATER) {
outputBufIndex = codec.dequeueOutputBuffer(outBuffInfo, CODEC_TIMEOUT_IN_MS);
if (outputBufIndex >= 0) {
ByteBuffer encodedData = codecOutputBuffers[outputBufIndex];
encodedData.position(outBuffInfo.offset);
encodedData.limit(outBuffInfo.offset + outBuffInfo.size);
if ((outBuffInfo.flags & MediaCodec.BUFFER_FLAG_CODEC_CONFIG) != 0 && outBuffInfo.size != 0) {
codec.releaseOutputBuffer(outputBufIndex, false);
outBuffInfo.size=0;
} else {
mux.writeSampleData(audioTrackIdx, codecOutputBuffers[outputBufIndex], outBuffInfo);
codec.releaseOutputBuffer(outputBufIndex, false);
}
} else if (outputBufIndex == MediaCodec.INFO_OUTPUT_FORMAT_CHANGED) {
outputFormat = codec.getOutputFormat();
Log.v(TAG, "Output format changed - " + outputFormat);
audioTrackIdx = mux.addTrack(outputFormat);
mux.start();
} else if (outputBufIndex == MediaCodec.INFO_OUTPUT_BUFFERS_CHANGED) {
Log.e(TAG, "Output buffers changed during encode!");
} else if (outputBufIndex == MediaCodec.INFO_TRY_AGAIN_LATER) {
// NO OP
} else {
Log.e(TAG, "Unknown return code from dequeueOutputBuffer - " + outputBufIndex);
}
}
percentComplete = (int) Math.round(((float) totalBytesRead / (float) inputFile.length()) * 100.0);
Log.v(TAG, "Conversion % - "+ percentComplete);
} while (outBuffInfo.flags != MediaCodec.BUFFER_FLAG_END_OF_STREAM);
fis.close();
mux.stop();
mux.release();
This new code has some issue... i Have extracted 4 sec video and it generated 46 sec audio.. i will look in to that...