awslabs / amazon-kinesis-video-streams-producer-c

https://awslabs.github.io/amazon-kinesis-video-streams-producer-c/group__PublicMemberFunctions.html
Apache License 2.0
56 stars 74 forks source link

Fragment decoding errorRefresh There was an error decoding the media. Verify that the stream contains valid content. #143

Closed yibin-lin closed 3 years ago

yibin-lin commented 3 years ago

Hi, I want to push h264 video and aac audio streams to Amazon server, but I still get the error message as the title shows. I have tried that if only the h264 video stream is pushed, it can be displayed successfully, such as the picture below. I don't know where my AAC audio stream causes the server to decode errors. I have also tried to write the AAC of IPC Camera as a file, and it can be played normally on the player. I also tried to change the timestamp of AAC, but no matter which method I tried, I still encountered this error. I am eager for your help, because from the client side, I can't think of what I did wrong. Below are my code and printed error messages, which may take up your precious time to help me analyze where the problem is. thank you very much.

This is a picture of a separate push video stream: ` 4c63cee0108bd30952c3bf2102fd47a

This is the printing when the video stream and the audio stream are pushed at the same time. There are printings I added and printing generated by calling interface functions:

image

image

` The following picture is the media information displayed on the Amazon server when the video stream and audio stream are pushed together:

image

image

Below is the code I wrote:

include <com/amazonaws/kinesis/video/cproducer/Include.h>

include "ffkvsc_module.h"

include "ffavs_audio.h"

ifdef __cplusplus

extern "C" {

endif

define DEFAULT_RETENTION_PERIOD 2 * HUNDREDS_OF_NANOS_IN_AN_HOUR

define DEFAULT_BUFFER_DURATION 120 * HUNDREDS_OF_NANOS_IN_A_SECOND

define DEFAULT_CALLBACK_CHAIN_COUNT 5

define DEFAULT_KEY_FRAME_INTERVAL 45

define DEFAULT_FPS_VALUE 25

define DEFAULT_STREAM_DURATION 20 * HUNDREDS_OF_NANOS_IN_A_SECOND

define DEFAULT_STORAGE_SIZE 16 1024 1024

define RECORDED_FRAME_AVG_BITRATE_BIT_PS 3800000

define SAMPLE_AUDIO_FRAME_DURATION (/20/128 * HUNDREDS_OF_NANOS_IN_A_MILLISECOND) //1024/8000

define SAMPLE_VIDEO_FRAME_DURATION (HUNDREDS_OF_NANOS_IN_A_SECOND / DEFAULT_FPS_VALUE)

define AUDIO_TRACK_SAMPLING_RATE /48000/8000

define AUDIO_TRACK_CHANNEL_CONFIG 2

define FILE_LOGGING_BUFFER_SIZE (100 * 1024)

define MAX_NUMBER_OF_LOG_FILES 5

static ffss_thread_t gs_kvsc_stream_task; static ffss_thread_t gs_kvsc_video_task; static ffss_thread_t gs_kvsc_audio_task; static ffstream_t gs_kvsc_video_stream; static ffstream_t gs_kvsc_audio_stream; bool gs_brun = false;

typedef struct { PBYTE buffer; UINT32 size; } FrameData, *PFrameData;

typedef struct { ATOMIC_BOOL firstVideoFramePut; UINT64 streamStopTime; UINT64 streamStartTime; STREAM_HANDLE streamHandle; CLIENT_HANDLE clientHandle; FrameData audioFrames[NUMBER_OF_AAC_FRAME_FILES]; FrameData videoFrames[NUMBER_OF_H264_FRAME_FILES];

} SampleCustomData, *PSampleCustomData;

SampleCustomData g_data;

// Forward declaration of the default thread sleep function VOID defaultThreadSleep(UINT64);

STATUS ffkvsc_read(PFrame pFrame, ffstream_t pHndl) { frame_info_t vinfo = NULL; STATUS retStatus = STATUS_SUCCESS;

CHK(pFrame != NULL, STATUS_NULL_ARG);
CHK(pHndl != NULL, STATUS_NULL_ARG);

// Get the size and read into frame
if(ffstream_read(pHndl, &vinfo) != 0)
{
    return STATUS_READ_FILE_FAILED;
}
pFrame->size = vinfo->u32FrmSize;
pFrame->frameData = (PBYTE)FFSS_DATA_NEXT(vinfo);
if(UNLIKELY(vinfo->stVideo.bIframe))
{
    pFrame->flags = FRAME_FLAG_KEY_FRAME;
}
else
{
    pFrame->flags = FRAME_FLAG_NONE;
}
if (pFrame->flags == FRAME_FLAG_KEY_FRAME) 
{
    //kvs_inf("Key frame size %" PRIu64, pFrame->size);
}

CleanUp: return retStatus; }

PVOID PutVideoFrameThread(PVOID args) { ffss_thread_set_name("KVS-Video"); STATUS retStatus = STATUS_SUCCESS; Frame frame; STATUS status; UINT32 frameIndex = 0; int result;

if(ffstream_open(&gs_kvsc_video_stream, SHM_STREAM_1_KEY) != 0)
{
    kvs_err("video stream open failed!!!");
    retStatus = STATUS_OPEN_FILE_FAILED;
    goto CleanUp;
}

frame.version = FRAME_CURRENT_VERSION;
frame.trackId = DEFAULT_VIDEO_TRACK_ID;
frame.duration = 0;
frame.decodingTs = 0;
frame.presentationTs = 0;
frame.index = 0;

G_ipcData->cloud.login_state = 1;  // login success,  send frame.
ffstream_seek_Iframe(&gs_kvsc_video_stream, -1);

while(gs_brun) 
{
    frame.index = frameIndex;
    frame.flags = FRAME_FLAG_KEY_FRAME;

result = ffkvsc_read(&frame, &gs_kvsc_video_stream);
if(result == STATUS_READ_FILE_FAILED)
    {
        continue;
    }
    else if(result != STATUS_SUCCESS)
    {
        kvs_err("ffkvsc_read result != STATUS_SUCCESS!!!");
        goto CleanUp;
    }

    status = putKinesisVideoFrame(g_data.streamHandle, &frame);
    if(STATUS_FAILED(status))
    {
        kvs_wrn("putKinesisVideoFrame failed with 0x%08x\n", status);
        status = STATUS_SUCCESS;
        continue;
    }
    else
    {
        kvs_inf("putKinesisVideoFrame successs.");
    }
    ATOMIC_STORE_BOOL(&g_data.firstVideoFramePut, TRUE);

    frame.presentationTs += SAMPLE_VIDEO_FRAME_DURATION;
    frame.decodingTs = frame.presentationTs;
    frameIndex++;
}
CHK_STATUS(stopKinesisVideoStreamSync(g_data.streamHandle));
CHK_STATUS(freeKinesisVideoStream(&(g_data.streamHandle)));
CHK_STATUS(freeKinesisVideoClient(&(g_data.clientHandle)));

CleanUp: G_ipcData->cloud.login_state = 0; if(retStatus != STATUS_SUCCESS) { kvs_wrn("CleanUp PutVideoFrameThread failed with 0x%08x", retStatus); } return (PVOID) (uintptr_t*) retStatus; }

PVOID PutAudioFrameThread(PVOID args) { ffss_thread_set_name("KVS-Audio"); STATUS retStatus = STATUS_SUCCESS; Frame frame; STATUS status; int result;

if(ffstream_open(&gs_kvsc_audio_stream, SHM_AUDIO_CAPTURE_KEY) != 0)
{
        kvs_err("audio stream open failed!!!");
        retStatus = STATUS_OPEN_FILE_FAILED;
        goto CleanUp;
    }

frame.version = FRAME_CURRENT_VERSION;
frame.trackId = DEFAULT_AUDIO_TRACK_ID;
frame.flags = FRAME_FLAG_NONE;
frame.duration = 0;
frame.decodingTs = 0; // relative time mode
frame.presentationTs = 0; // relative time mode
frame.index = 0;

while(gs_brun) 
{
    result = ffkvsc_read(&frame, &gs_kvsc_audio_stream);
if(result == STATUS_READ_FILE_FAILED)
    {
        continue;
    }
else if(result != STATUS_SUCCESS)
    {
        goto CleanUp;
    }

    // no audio can be put until first video frame is put
    if(ATOMIC_LOAD_BOOL(&g_data.firstVideoFramePut))
    {
        frame.flags = FRAME_FLAG_NONE;
        status = putKinesisVideoFrame(g_data.streamHandle, &frame);
        if(STATUS_FAILED(status))
        {
            kvs_wrn("audio putKinesisVideoFrame failed with 0x%08x\n", status);
            status = STATUS_SUCCESS;
        }
        else
        {
            kvs_inf("audio putKinesisVideoFrame success.");
        }
        frame.presentationTs += SAMPLE_AUDIO_FRAME_DURATION;
        frame.decodingTs = frame.presentationTs;
        frame.index++; 
    }   
}

CleanUp: if (retStatus != STATUS_SUCCESS) { kvs_wrn("PutAudioFrameThread failed with 0x%08x", retStatus); } return (PVOID) (uintptr_t*) retStatus; }

static int load_kvsc(void) { PDeviceInfo pDeviceInfo = NULL; PStreamInfo pStreamInfo = NULL; PClientCallbacks pClientCallbacks = NULL; PStreamCallbacks pStreamCallbacks = NULL; CLIENT_HANDLE clientHandle = INVALID_CLIENT_HANDLE_VALUE; STREAM_HANDLE streamHandle = INVALID_STREAM_HANDLE_VALUE; STATUS retStatus = STATUS_SUCCESS; PCHAR accessKey = NULL, secretKey = NULL, sessionToken = NULL; PCHAR streamName = NULL, region = NULL, cacertPath = NULL; PTrackInfo pAudioTrack = NULL; BYTE audioCpd[KVS_AAC_CPD_SIZE_BYTE]; UINT64 streamStopTime, streamingDuration = DEFAULT_STREAM_DURATION;

memset(&g_data, 0, sizeof(SampleCustomData));

G_ipcData->cloud.login_state = 0; // default not login.

accessKey    = AWS_ACESS_KEY_ID;
secretKey    = AWS_SECRET_ACESS_KEY;
cacertPath   = "/etc/ssl/SFSRootCAG2.pem";
sessionToken = NULL;
streamName   = STREAM_NAME;
region       = (PCHAR) DEFAULT_AWS_REGION;

streamStopTime = defaultGetTime() + streamingDuration;
//create dev info
CHK_STATUS(createDefaultDeviceInfo(&pDeviceInfo));
pDeviceInfo->clientInfo.loggerLogLevel = LOG_LEVEL_DEBUG;
pDeviceInfo->storageInfo.storageSize = DEFAULT_STORAGE_SIZE;

//create streaiminfo 
CHK_STATUS(createRealtimeAudioVideoStreamInfoProvider(streamName, DEFAULT_RETENTION_PERIOD, DEFAULT_BUFFER_DURATION, &pStreamInfo));

// set up audio cpd.    
pAudioTrack = pStreamInfo->streamCaps.trackInfoList[0].trackId == DEFAULT_AUDIO_TRACK_ID ?
              &pStreamInfo->streamCaps.trackInfoList[0] :
              &pStreamInfo->streamCaps.trackInfoList[1];

// generate audio cpd
pAudioTrack->codecPrivateData = audioCpd;
pAudioTrack->codecPrivateDataSize = KVS_AAC_CPD_SIZE_BYTE;
CHK_STATUS(mkvgenGenerateAacCpd(AAC_LC, AUDIO_TRACK_SAMPLING_RATE, AUDIO_TRACK_CHANNEL_CONFIG, pAudioTrack->codecPrivateData, pAudioTrack->codecPrivateDataSize));

// use relative time mode. Buffer timestamps start from 0
pStreamInfo->streamCaps.absoluteFragmentTimes = FALSE;    

// adjust members of pStreamInfo here if needed
CHK_STATUS(createDefaultCallbacksProviderWithAwsCredentials(accessKey,
                                                            secretKey,
                                                            sessionToken,
                                                            MAX_UINT64,
                                                            region,
                                                            cacertPath,
                                                            NULL,
                                                            NULL,
                                                            &pClientCallbacks));

if(NULL != getenv(ENABLE_FILE_LOGGING)) 
{
    if((retStatus = addFileLoggerPlatformCallbacksProvider(pClientCallbacks,
                                                           FILE_LOGGING_BUFFER_SIZE,
                                                           MAX_NUMBER_OF_LOG_FILES,
                                                           (PCHAR) FILE_LOGGER_LOG_FILE_DIRECTORY_PATH,
                                                           TRUE) != STATUS_SUCCESS)) {
        kvs_err("File logging enable option failed with 0x%08x error code\n", retStatus);
    }
}

CHK_STATUS(createStreamCallbacks(&pStreamCallbacks));
CHK_STATUS(addStreamCallbacks(pClientCallbacks, pStreamCallbacks));

CHK_STATUS(createKinesisVideoClient(pDeviceInfo, pClientCallbacks, &clientHandle));
CHK_STATUS(createKinesisVideoStreamSync(clientHandle, pStreamInfo, &streamHandle));

g_data.streamStopTime = streamStopTime;
g_data.streamHandle = streamHandle;
g_data.clientHandle = clientHandle;
g_data.streamStartTime = defaultGetTime();
ATOMIC_STORE_BOOL(&g_data.firstVideoFramePut, FALSE);
gs_brun = true;

if(ffss_thread_create_join(&gs_kvsc_video_task, PutVideoFrameThread, NULL) != FFSS_OK)
{
    kvs_err("ffss_thread_create_join PutVideoFrameThread failed!!!");
}
if(ffss_thread_create_join(&gs_kvsc_audio_task, PutAudioFrameThread, NULL) != FFSS_OK)
{
    kvs_err("ffss_thread_create_join PutAudioFrameThread failed!!!");
}   
return 0;

CleanUp: G_ipcData->cloud.login_state = 0; if (STATUS_FAILED(retStatus)) { G_ipcData->cloud.login_state = 2; kvs_err("Failed with status 0x%08x\n", retStatus); } if (LIKELY(pDeviceInfo != NULL)) { freeDeviceInfo(&pDeviceInfo); } if (LIKELY(pStreamInfo != NULL)) { freeStreamInfoProvider(&pStreamInfo); } if (IS_VALID_STREAM_HANDLE(streamHandle)) { freeKinesisVideoStream(&streamHandle); } if (IS_VALID_CLIENT_HANDLE(clientHandle)) { freeKinesisVideoClient(&clientHandle); } if (LIKELY(pClientCallbacks != NULL)) { freeCallbacksProvider(&pClientCallbacks); }
return 0; }

MushMal commented 3 years ago

Why are you not using absolute timestamp mode? In relative mode, every session will start from 0 - for example, if the SDK does token rotation or if the connection drops, the new video stream will have 0 as the first frame timestamp. Some applications are OK with it but most cases would use absolute timestamp.

At any rate, taking a quick look at your application code there really isn't anything wrong that I can spot immediately. However, when I looked at the picture of the snippet of logs, I can see the following

0x52000085 status code is STATUS_MAX_FRAME_TIMESTAMP_DELTA_BETWEEN_TRACKS_EXCEEDED which is defined in

https://github.com/awslabs/amazon-kinesis-video-streams-pic/blob/master/src/client/include/com/amazonaws/kinesis/video/client/Include.h#L182

This is an indication that the frame order coordinator buffer is overflown. This happens when the coordinator accumulates the frames from one track until it can interleave the frames from the other track so the timestamps will be monotonically increasing. That small buffer size is 65 frames by default. What's likely happening is that either your audio or video is not producing frames at a given cadence or your audio timestamps are on a different timelines than the video frames. For example, if the audio is using a clock that starts from 0 whereas the video encoder is using the wall clock or something like that. The GStreamer plugin attempts to compensate for all these issues. You might need to compensate for your timeline drift to ensure audio and video frames are on the same timeline so the frame order coordinator can properly reorder/interlace the audio video frames.

This said, I am not yet sure whether this is the root cause of your decoding issue. There are too many unknowns.

Try to capture longer logs. Analyze your logs and see what's going on. Dump the timestamps and ensure they are on the same timeline. Ensure the encoder is producing both audio and video frames. Note that between the video frames there can not be more than 65 audio frames. Of course, if your case requires, you could change the default 65 frame local frame order coordinator buffer to something larger at the expense of more memory but usually it's an indication of a bad media pipeline.

Please update the info with your findings. Hope the pointers I gave will help you debug your media pipeline in more details.

yibin-lin commented 3 years ago

  Thank you very much, I will modify the code according to your suggestions first, if I have other questions, I will ask you again, thank you.

------------------ 原始邮件 ------------------ 发件人: "awslabs/amazon-kinesis-video-streams-producer-c" <notifications@github.com>; 发送时间: 2020年12月1日(星期二) 下午2:18 收件人: "awslabs/amazon-kinesis-video-streams-producer-c"<amazon-kinesis-video-streams-producer-c@noreply.github.com>; 抄送: "冰川世纪f"<1432820971@qq.com>;"Author"<author@noreply.github.com>; 主题: Re: [awslabs/amazon-kinesis-video-streams-producer-c] Fragment decoding errorRefresh There was an error decoding the media. Verify that the stream contains valid content. (#143)

Why are you not using absolute timestamp mode? In relative mode, every session will start from 0 - for example, if the SDK does token rotation or if the connection drops, the new video stream will have 0 as the first frame timestamp. Some applications are OK with it but most cases would use absolute timestamp.

At any rate, taking a quick look at your application code there really isn't anything wrong that I can spot immediately. However, when I looked at the picture of the snippet of logs, I can see the following

The streaming is 17 seconds behind - likely an issue with networking throughput or the rollback as you have errors

There were 4 errors in the stream - not sure what they were as there is no log attached

Your multi-track does not produce frames from all of the tracks or the timelines are different. There is an error 0x52000085 returned from a put frame call.

0x52000085 status code is STATUS_MAX_FRAME_TIMESTAMP_DELTA_BETWEEN_TRACKS_EXCEEDED which is defined in

https://github.com/awslabs/amazon-kinesis-video-streams-pic/blob/master/src/client/include/com/amazonaws/kinesis/video/client/Include.h#L182

This is an indication that the frame order coordinator buffer is overflown. This happens when the coordinator accumulates the frames from one track until it can interleave the frames from the other track so the timestamps will be monotonically increasing. That small buffer size is 65 frames by default. What's likely happening is that either your audio or video is not producing frames at a given cadence or your audio timestamps are on a different timelines than the video frames. For example, if the audio is using a clock that starts from 0 whereas the video encoder is using the wall clock or something like that. The GStreamer plugin attempts to compensate for all these issues. You might need to compensate for your timeline drift to ensure audio and video frames are on the same timeline so the frame order coordinator can properly reorder/interlace the audio video frames.

This said, I am not yet sure whether this is the root cause of your decoding issue. There are too many unknowns.

Try to capture longer logs. Analyze your logs and see what's going on. Dump the timestamps and ensure they are on the same timeline. Ensure the encoder is producing both audio and video frames. Note that between the video frames there can not be more than 65 audio frames. Of course, if your case requires, you could change the default 65 frame local frame order coordinator buffer to something larger at the expense of more memory but usually it's an indication of a bad media pipeline.

Please update the info with your findings. Hope the pointers I gave will help you debug your media pipeline in more details.

— You are receiving this because you authored the thread. Reply to this email directly, view it on GitHub, or unsubscribe.

yibin-lin commented 3 years ago

  hi, if what I want to push is h264 video and aac audio with a sampling rate of 8K, is the code defined here correct?

   #define DEFAULT_RETENTION_PERIOD            2 * HUNDREDS_OF_NANOS_IN_AN_HOUR

define DEFAULT_BUFFER_DURATION               120 * HUNDREDS_OF_NANOS_IN_A_SECOND

define DEFAULT_CALLBACK_CHAIN_COUNT       5

define DEFAULT_KEY_FRAME_INTERVAL          45

define DEFAULT_FPS_VALUE                          25

define DEFAULT_STREAM_DURATION             20 * HUNDREDS_OF_NANOS_IN_A_SECOND

define DEFAULT_STORAGE_SIZE                   16 1024 1024

define RECORDED_FRAME_AVG_BITRATE_BIT_PS   3800000

define SAMPLE_AUDIO_FRAME_DURATION         (/20/128 * HUNDREDS_OF_NANOS_IN_A_MILLISECOND)   //1024/8000

define SAMPLE_VIDEO_FRAME_DURATION         (HUNDREDS_OF_NANOS_IN_A_SECOND / DEFAULT_FPS_VALUE)

define AUDIO_TRACK_SAMPLING_RATE           /48000/8000

define AUDIO_TRACK_CHANNEL_CONFIG          2

------------------ 原始邮件 ------------------ 发件人: "awslabs/amazon-kinesis-video-streams-producer-c" <notifications@github.com>; 发送时间: 2020年12月1日(星期二) 下午2:18 收件人: "awslabs/amazon-kinesis-video-streams-producer-c"<amazon-kinesis-video-streams-producer-c@noreply.github.com>; 抄送: "冰川世纪f"<1432820971@qq.com>;"Author"<author@noreply.github.com>; 主题: Re: [awslabs/amazon-kinesis-video-streams-producer-c] Fragment decoding errorRefresh There was an error decoding the media. Verify that the stream contains valid content. (#143)

Why are you not using absolute timestamp mode? In relative mode, every session will start from 0 - for example, if the SDK does token rotation or if the connection drops, the new video stream will have 0 as the first frame timestamp. Some applications are OK with it but most cases would use absolute timestamp.

At any rate, taking a quick look at your application code there really isn't anything wrong that I can spot immediately. However, when I looked at the picture of the snippet of logs, I can see the following

The streaming is 17 seconds behind - likely an issue with networking throughput or the rollback as you have errors

There were 4 errors in the stream - not sure what they were as there is no log attached

Your multi-track does not produce frames from all of the tracks or the timelines are different. There is an error 0x52000085 returned from a put frame call.

0x52000085 status code is STATUS_MAX_FRAME_TIMESTAMP_DELTA_BETWEEN_TRACKS_EXCEEDED which is defined in

https://github.com/awslabs/amazon-kinesis-video-streams-pic/blob/master/src/client/include/com/amazonaws/kinesis/video/client/Include.h#L182

This is an indication that the frame order coordinator buffer is overflown. This happens when the coordinator accumulates the frames from one track until it can interleave the frames from the other track so the timestamps will be monotonically increasing. That small buffer size is 65 frames by default. What's likely happening is that either your audio or video is not producing frames at a given cadence or your audio timestamps are on a different timelines than the video frames. For example, if the audio is using a clock that starts from 0 whereas the video encoder is using the wall clock or something like that. The GStreamer plugin attempts to compensate for all these issues. You might need to compensate for your timeline drift to ensure audio and video frames are on the same timeline so the frame order coordinator can properly reorder/interlace the audio video frames.

This said, I am not yet sure whether this is the root cause of your decoding issue. There are too many unknowns.

Try to capture longer logs. Analyze your logs and see what's going on. Dump the timestamps and ensure they are on the same timeline. Ensure the encoder is producing both audio and video frames. Note that between the video frames there can not be more than 65 audio frames. Of course, if your case requires, you could change the default 65 frame local frame order coordinator buffer to something larger at the expense of more memory but usually it's an indication of a bad media pipeline.

Please update the info with your findings. Hope the pointers I gave will help you debug your media pipeline in more details.

— You are receiving this because you authored the thread. Reply to this email directly, view it on GitHub, or unsubscribe.

yibin-lin commented 3 years ago

hi, I modified my code according to your suggestion and used absolute timestamp. I printed the timestamp of the video and audio as shown in the figure below. From the figure, we can see that the video and audio should be on the same time axis. However, as shown in the print below, there is still an error, I don’t know what the error is.

image

image

image

Below is my modified code:

STATUS ffkvsc_read(PFrame pFrame, ffstream_t pHndl) { frame_info_t vinfo = NULL; STATUS retStatus = STATUS_SUCCESS;

CHK(pFrame != NULL, STATUS_NULL_ARG);
CHK(pHndl != NULL, STATUS_NULL_ARG);

// Get the size and read into frame
if(ffstream_read(pHndl, &vinfo) != 0)
{
    return STATUS_READ_FILE_FAILED;
}
pFrame->size = vinfo->u32FrmSize;
pFrame->frameData = (PBYTE)FFSS_DATA_NEXT(vinfo);
if(UNLIKELY(vinfo->stVideo.bIframe))
{
    pFrame->flags = FRAME_FLAG_KEY_FRAME;

// kvs_inf("get iframe."); } else { pFrame->flags = FRAME_FLAG_NONE; } if (pFrame->flags == FRAME_FLAG_KEY_FRAME) { //kvs_inf("Key frame size %" PRIu64, pFrame->size); } CleanUp: return retStatus; }

PVOID PutMediaFrameThread(PVOID args) { ffss_thread_set_name("KVS-Media"); STATUS retStatus = STATUS_SUCCESS; Frame video_frame, audio_frame; STATUS status; UINT32 video_frameIndex = 0, audio_frameIndex = 0; int result;

if(ffstream_open(&gs_kvsc_video_stream, SHM_STREAM_1_KEY) != 0)
{
    retStatus = STATUS_OPEN_FILE_FAILED;
    goto CleanUp;
}
if(ffstream_open(&gs_kvsc_audio_stream, SHM_AUDIO_CAPTURE_KEY) != 0)
{
    //kvs_err("audio stream open failed!!!");
    retStatus = STATUS_OPEN_FILE_FAILED;
    goto CleanUp;
}

video_frame.version = FRAME_CURRENT_VERSION;
video_frame.trackId = DEFAULT_VIDEO_TRACK_ID;
video_frame.duration = 0;
video_frame.decodingTs = 0;
video_frame.presentationTs = 0;
video_frame.index = 0;

audio_frame.version = FRAME_CURRENT_VERSION;
audio_frame.trackId = DEFAULT_AUDIO_TRACK_ID;
audio_frame.flags = FRAME_FLAG_NONE;
audio_frame.duration = 0;
audio_frame.decodingTs = 0; // relative time mode
audio_frame.presentationTs = 0; // relative time mode
audio_frame.index = 0;

G_ipcData->cloud.login_state = 1;  // login success,  send frame.
ffstream_seek_Iframe(&gs_kvsc_video_stream, -1);
while(gs_brun) 
{
    video_frame.flags = FRAME_FLAG_KEY_FRAME;
    result = ffkvsc_read(&video_frame, &gs_kvsc_video_stream);
    if(result == STATUS_READ_FILE_FAILED)
    {
        continue;
    }
    else if(result != STATUS_SUCCESS)
    {
        goto CleanUp;
    }
kvs_inf("video frame.presentationTs:%lld", video_frame.presentationTs);
    status = putKinesisVideoFrame(g_data.streamHandle, &video_frame);
    if(STATUS_FAILED(status))
    {
        status = STATUS_SUCCESS;
        continue;
    }
    else
    {
        //kvs_inf("putKinesisVideoFrame successs.");
    }
    ATOMIC_STORE_BOOL(&g_data.firstVideoFramePut, TRUE);
    video_frame.index++;

    //audio
    if(g_data.firstVideoFramePut)
    {
        result = ffkvsc_read(&audio_frame, &gs_kvsc_audio_stream);
        if(result == STATUS_READ_FILE_FAILED)
        {
            continue;
        }
        else if(result != STATUS_SUCCESS)
        {
            goto CleanUp;
        }

        audio_frame.flags = FRAME_FLAG_NONE;
        kvs_inf("audio frame.presentationTs:%lld", audio_frame.presentationTs);
        status = putKinesisVideoFrame(g_data.streamHandle, &audio_frame);
        if(STATUS_FAILED(status))
        {
            status = STATUS_SUCCESS;
        }
        else
        {
        }
        audio_frame.index++;
    }
}

CHK_STATUS(stopKinesisVideoStreamSync(g_data.streamHandle));
CHK_STATUS(freeKinesisVideoStream(&(g_data.streamHandle)));
CHK_STATUS(freeKinesisVideoClient(&(g_data.clientHandle)));

CleanUp: G_ipcData->cloud.login_state = 0; if(retStatus != STATUS_SUCCESS) { kvs_wrn("CleanUp PutVideoFrameThread failed with 0x%08x", retStatus); } return (PVOID) (uintptr_t*) retStatus; }

MushMal commented 3 years ago

@yibin-lin not sure what your ask is. You are explicitly not including the logs so I can't tell you what error you are seeing.

Try to run it and look through your logs. From what I see, you are able to stream out the data once you got the frames in the same timeline. The buffer duration in your picture is about 10 seconds so ensure you have adequate bandwidth. Perhaps the number of errors is reflected because of the buffer pressures and dropped connections. I see you have multiple sessions.

Please resolve this issue as your original question has been answered. Cut a new issue with DETAILED description of the issue and the logs to get more help.

disa6302 commented 3 years ago

Closing as the original question is answered.