Open GiBeom-Song opened 4 years ago
Here is my test code to utilize jetson-ffmpeg.
#include <stdio.h> #include <stdint.h> #include <stdlib.h> #include <unistd.h> #include <cassert> #include <iostream> #include <sys/time.h> #include <fstream> #include <chrono> #include <ctime> #include <cstdio> #include <cstdlib> #include <thread> #include <mutex> #include <sys/socket.h> #include <netinet/in.h> #include <string.h> #include <dirent.h> #include <fstream> #include <FreeImage.h> #include <vector> extern "C"{ #include <nvmpi.h> #include "jetson_ffmpeg/avcodec.h" #include "jetson_ffmpeg/decode.h" #include "jetson_ffmpeg/internal.h" #include <libavcodec/avcodec.h> #include <libavutil/buffer.h> #include <libavutil/common.h> #include <libavutil/frame.h> #include <libavcodec/avcodec.h> #include <libavformat/avformat.h> #include <libavutil/pixdesc.h> #include <libavutil/hwcontext.h> #include <libavutil/hwcontext_drm.h> #include <libavutil/opt.h> #include <libavutil/avassert.h> #include <libavutil/imgutils.h> #include <libavutil/log.h> } using namespace std; static AVBufferRef *hw_device_ctx = NULL; enum AVHWDeviceType type; static enum AVPixelFormat hw_pix_fmt; typedef struct { char eos_reached; nvmpictx* ctx; AVClass *av_class; } nvmpiDecodeContext; static int hw_decoder_init(AVCodecContext *ctx, const enum AVHWDeviceType type) { int err = 0; if ((err = av_hwdevice_ctx_create(&hw_device_ctx, type, NULL, NULL, 0)) < 0) { fprintf(stderr, "Failed to create specified HW device.\n"); return err; } ctx->hw_device_ctx = av_buffer_ref(hw_device_ctx); return err; } static enum AVPixelFormat get_hw_format(AVCodecContext *ctx, const enum AVPixelFormat *pix_fmts) { const enum AVPixelFormat *p; for (p = pix_fmts; *p != -1; p++) { if (*p == hw_pix_fmt) return *p; } fprintf(stderr, "Failed to get HW surface format.\n"); return AV_PIX_FMT_NONE; } static nvCodingType nvmpi_get_codingtype(AVCodecContext *avctx) { switch (avctx->codec_id) { case AV_CODEC_ID_H264: return NV_VIDEO_CodingH264; case AV_CODEC_ID_HEVC: return NV_VIDEO_CodingHEVC; case AV_CODEC_ID_VP8: return NV_VIDEO_CodingVP8; case AV_CODEC_ID_VP9: return NV_VIDEO_CodingVP9; case AV_CODEC_ID_MPEG4: return NV_VIDEO_CodingMPEG4; case AV_CODEC_ID_MPEG2VIDEO: return NV_VIDEO_CodingMPEG2; default: return NV_VIDEO_CodingUnused; } }; static int nvmpi_init_decoder(AVCodecContext *avctx){ int ret=0; nvmpiDecodeContext *nvmpi_context = (nvmpiDecodeContext *)avctx->priv_data; nvCodingType codectype=NV_VIDEO_CodingUnused; codectype =nvmpi_get_codingtype(avctx); // NV_VIDEO_CodingH264 if (codectype == NV_VIDEO_CodingUnused) { av_log(avctx, AV_LOG_ERROR, "Unknown codec type (%d).\n", avctx->codec_id); ret = AVERROR_UNKNOWN; return ret; } nvmpi_context->ctx=nvmpi_create_decoder(codectype,NV_PIX_YUV420); if(!nvmpi_context->ctx){ av_log(avctx, AV_LOG_ERROR, "Failed to nvmpi_create_decoder (code = %d).\n", ret); ret = AVERROR_UNKNOWN; return ret; } return ret; } static int nvmpi_close(AVCodecContext *avctx){ nvmpiDecodeContext *nvmpi_context = (nvmpiDecodeContext *)avctx->priv_data; return nvmpi_decoder_close(nvmpi_context->ctx); } static int nvmpi_decode(AVCodecContext *avctx,void *data,int *got_frame, AVPacket *avpkt){ //nvmpiDecodeContext *nvmpi_context = avctx->priv_data; nvmpiDecodeContext *nvmpi_context = (nvmpiDecodeContext*)avctx->priv_data; AVFrame *frame = (AVFrame *)data; nvFrame _nvframe={0}; nvPacket packet; uint8_t* ptrs[3]; int res,linesize[3]; if(avpkt->size){ packet.payload_size=avpkt->size; packet.payload=avpkt->data; packet.pts=avpkt->pts; res=nvmpi_decoder_put_packet(nvmpi_context->ctx,&packet); cout << "RES TEST:" << res << endl; } res=nvmpi_decoder_get_frame(nvmpi_context->ctx,&_nvframe,avctx->flags & AV_CODEC_FLAG_LOW_DELAY); cout << "decode RES:" << res << endl; if(res<0) return avpkt->size; //if (ff_get_buffer(avctx, frame, 0) < 0) { //return AVERROR(ENOMEM); //} int ret; ret = avcodec_receive_frame(avctx, frame); cout << "TEST" << ret << endl; //return AVERROR(ENOMEM); linesize[0]=_nvframe.linesize[0]; linesize[1]=_nvframe.linesize[1]; linesize[2]=_nvframe.linesize[2]; cout << "TEST" << endl; cout << linesize[0] << endl; cout << linesize[1] << endl; cout << linesize[2] << endl; ptrs[0]=_nvframe.payload[0]; ptrs[1]=_nvframe.payload[1]; ptrs[2]=_nvframe.payload[2]; av_image_copy(frame->data, frame->linesize, (const uint8_t **) ptrs, linesize, avctx->pix_fmt, _nvframe.width,_nvframe.height); frame->width=_nvframe.width; frame->height=_nvframe.height; frame->format=AV_PIX_FMT_YUV420P; frame->pts=_nvframe.timestamp; frame->pkt_dts = AV_NOPTS_VALUE; avctx->coded_width=_nvframe.width; avctx->coded_height=_nvframe.height; avctx->width=_nvframe.width; avctx->height=_nvframe.height; *got_frame = 1; return avpkt->size; } int main(){ AVFormatContext *input_ctx = NULL; AVInputFormat *input_fmt; int video_stream, ret; AVStream *video = NULL; AVCodecContext *decoder_ctx = NULL; AVCodec *decoder = NULL; AVPacket packet; double fps; // Jetson nano testing int ret2; nvmpictx *ctx_nvmpi = NULL; nvPacket packet_nvmpi; int got_frame; string path = "sample_convert5.h264"; //string path = "/usr/src/jetson_multimedia_api/data/Video/sample_outdoor_car_1080p_10fps.h264"; input_fmt = av_find_input_format("h264_nvmpi"); //if (avformat_open_input(&input_ctx, path.c_str(), input_fmt, NULL) != 0){ if (avformat_open_input(&input_ctx, path.c_str(), NULL, NULL) != 0){ avformat_close_input(&input_ctx); cout << "FORMAT OPEN ERROR" << endl; return -1; } if (avformat_find_stream_info(input_ctx, NULL) < 0) { fprintf(stderr, "Cannot find input stream information.\n"); return -1; } /* find the video stream information */ ret = av_find_best_stream(input_ctx, AVMEDIA_TYPE_VIDEO, -1, -1, &decoder, 0); if (ret < 0) { fprintf(stderr, "Cannot find a video stream in the input file\n"); return -1; } video_stream = ret; //decoder_ctx = input_ctx->streams[video_stream]->codec; decoder = avcodec_find_decoder(AV_CODEC_ID_H264); if (!(decoder_ctx = avcodec_alloc_context3(decoder))) return AVERROR(ENOMEM); ret = avcodec_open2(decoder_ctx, decoder, NULL); cout << "DECODEROPEN:" << ret << endl; ret = nvmpi_init_decoder(decoder_ctx); cout << ret << endl; if (ret == AVERROR_UNKNOWN) cout << "NVMPI_INIT_ERROR" << endl; AVFrame *frame; for(int i = 0; i<5; i ++){ cout << "PRINT TEST" << endl; if ((ret = av_read_frame(input_ctx, &packet)) < 0){ cout << "[DEBUG] FRAME_READ_FAILED" << endl; packet.data = NULL; packet.size = 0; av_packet_unref(&packet); return -1; } //AVFrame *frame = NULL; ret = nvmpi_decode(decoder_ctx, frame, &got_frame, &packet); //ret = nvmpi_decode(decoder_ctx, packet.data, &got_frame, &packet); cout << "decode ret:" << ret << endl; cout << "frame:" << frame << endl; } cout << "TEST" << endl; nvmpi_close(decoder_ctx); cout << "TEST" << endl; return 0;
The result of test code is like that
How can I use jetson-ffmpeg to decode frames in C++? Also, ff_get_buffer cause linking error (undefined reference to ff_get_buffer)
@GiBeom-Song Is the problem solved?
Here is my test code to utilize jetson-ffmpeg.
The result of test code is like that
How can I use jetson-ffmpeg to decode frames in C++? Also, ff_get_buffer cause linking error (undefined reference to ff_get_buffer)