'avcodec_decode_video' of ffmpeg doesn't work - visual-c++

i use vc++ express, and am going to get with ffmpeg..
but with the 1st program i met a trouble.
vc++ says 'identifier 'avcodec_decode_video': identifier not found' on commpile process.
i don't know why....
next is waht i coded...
.
include "avcodec.h"
include "avformat.h"
include "swscale.h"
int main(int argc, char *argv[])
{
av_register_all();
AVFormatContext *pFormatCtx;
// Open video file
if(av_open_input_file(&pFormatCtx, argv[1], NULL, 0, NULL)!=0)
return -1; // Couldn't open file
// Retrieve stream information
if(av_find_stream_info(pFormatCtx)<0)
return -1; // Couldn't find stream information
// Dump information about file onto standard error
dump_format(pFormatCtx, 0, argv[1], 0);
int i;
AVCodecContext *pCodecCtx;
// Find the first video stream
int videoStream=-1;
for(i=0; i<pFormatCtx->nb_streams; i++)
if(pFormatCtx->streams[i]->codec->codec_type==AVMEDIA_TYPE_VIDEO) {
videoStream=i;
break;
}
if(videoStream==-1)
return -1; // Didn't find a video stream
// Get a pointer to the codec context for the video stream
pCodecCtx=pFormatCtx->streams[videoStream]->codec;
AVCodec *pCodec;
// Find the decoder for the video stream
pCodec=avcodec_find_decoder(pCodecCtx->codec_id);
if(pCodec==NULL) {
fprintf(stderr, "Unsupported codec!\n");
return -1; // Codec not found
}
// Open codec
if(avcodec_open(pCodecCtx, pCodec)<0)
return -1; // Could not open codec
AVFrame *pFrame;
// Allocate video frame
pFrame=avcodec_alloc_frame();
// Allocate an AVFrame structure
AVFrame* pFrameRGB=avcodec_alloc_frame();
if(pFrameRGB==NULL)
return -1;
uint8_t *buffer;
int numBytes;
// Determine required buffer size and allocate buffer
numBytes=avpicture_get_size(PIX_FMT_RGB24, pCodecCtx->width,pCodecCtx->height);
buffer=(uint8_t *)av_malloc(numBytes*sizeof(uint8_t));
// Assign appropriate parts of buffer to image planes in pFrameRGB
// Note that pFrameRGB is an AVFrame, but AVFrame is a superset
// of AVPicture
avpicture_fill((AVPicture *)pFrameRGB, buffer, PIX_FMT_RGB24,pCodecCtx->width, pCodecCtx->height);
int frameFinished;
AVPacket packet;
i=0;
while(av_read_frame(pFormatCtx, &packet)>=0) {
if(packet.stream_index==videoStream) {
**// here makes compile error**
avcodec_decode_video(pCodecCtx, pFrame, &frameFinished,packet.data, packet.size);
if(frameFinished) {
img_convert((AVPicture *)pFram eRGB, PIX_FMT_RGB24, (AVPicture*)pFrame, pCodecCtx->pix_fmt,pCodecCtx->width, pCodecCtx->height);
}
av_free_packet(&packet);
}
av_free(buffer);
av_free(pFrameRGB);
av_free(pFrame);
avcodec_close(pCodecCtx);
av_close_input_file(pFormatCtx);
return 0;
}

Maybe it's simply because avcodec_decode_video was replaced by avcodec_decode_video2 in ffmpeg?
http://cekirdek.pardus.org.tr/~ismail/ffmpeg-docs/deprecated.html

Related

std::Future_error when using std::promise

I'm trying to make a video player. I have added a thread to time how long a video should be show on the screen. I'm trying to decode the video and update window in the main thread; the second thread will get the packets, see how long the packet should be displayed, and send the packet to main thread then wait for time to elapse.
For some reason I get this error:
terminate called after throwing an instance of 'std::future_error'
what(): std::future_error: No associated state
What's causing the error?
My Code:
extern "C"{
//FFmpeg libraries
#include <libavcodec/avcodec.h>
#include <libavformat/avformat.h>
#include <libswscale/swscale.h>
//SDL2 libraries
#include <SDL2/SDL.h>
}
// compatibility with newer API
#if LIBAVCODEC_VERSION_INT < AV_VERSION_INT(55,28,1)
#define av_frame_alloc avcodec_alloc_frame
#define av_frame_free avcodec_free_frame
#endif
//C++ libraries
#include <memory>
#include <stdio.h>
#include <iostream>
#include <chrono>
#include <thread>
#include <mutex>
#include <condition_variable>
#include <future>
typedef struct PacketQueue {
AVPacketList *first_pkt, *last_pkt;
} PacketQueue;
std::atomic<bool> quitting;
std::mutex mutex;
std::condition_variable convar;
int packet_queue_put(PacketQueue *q, AVPacket *pkt){
AVPacketList *pkt1;
if(av_dup_packet(pkt) < 0){
return -1;
}
pkt1 = (AVPacketList*) av_malloc(sizeof(AVPacketList));
if(!pkt1){
return -1;
}
pkt1->pkt = *pkt;
pkt1->next = NULL;
std::lock_guard<std::mutex> lock(mutex);
if (!q->last_pkt){
q->first_pkt = pkt1;
}else{
q->last_pkt->next = pkt1;
}
q->last_pkt = pkt1;
convar.notify_all();
return 0;
}
static int packet_queue_get(PacketQueue *q, AVPacket *pkt){
AVPacketList *pkt1;
int ret;
std::unique_lock<std::mutex> lk(mutex);
while(1){
if(quitting){
ret = -1;
break;
}
pkt1 = q->first_pkt;
if(pkt1){
q->first_pkt = pkt1->next;
if(!q->first_pkt){
q->last_pkt = NULL;
}
*pkt = pkt1->pkt;
av_free(pkt1);
ret = 1;
break;
}else {
convar.wait_for(lk, std::chrono::milliseconds(1));
}
}
return ret;
}
void videoTimerFunc(AVRational time_base, PacketQueue* videoq, std::promise<AVPacket> prms){
AVPacket pkt;
int64_t last_pts = 0;
int64_t frameDelay;
AVRational microseconds = {1, 1000000};
while(!quitting){
// Getting packet and check if there are more packets
if(!packet_queue_get(videoq, &pkt)){
// Close programme
quitting = true;
}else {
// Send packet and create timer
frameDelay = av_rescale_q(pkt.dts, time_base, microseconds) - last_pts;
last_pts = av_rescale_q(pkt.dts, time_base, microseconds);
prms.set_value(pkt);
std::this_thread::sleep_for(std::chrono::microseconds(frameDelay));
}
}
}
int main(int argc, char *argv[]){
AVFormatContext* FormatCtx = nullptr;
AVCodecContext* CodecCtxOrig = nullptr;
AVCodecContext* CodecCtx = nullptr;
AVCodec* Codec = nullptr;
int videoStream;
AVFrame* Frame = nullptr;
AVPacket packet;
struct SwsContext* SwsCtx = nullptr;
PacketQueue videoq;
std::promise<AVPacket> pktprms;
std::future<AVPacket> pktftr = pktprms.get_future();
int frameFinished;
int64_t lastPTS;
SDL_Event event;
SDL_Window* screen;
SDL_Renderer* renderer;
SDL_Texture* texture;
std::shared_ptr<Uint8> yPlane, uPlane, vPlane;
int uvPitch;
if (argc != 2) {
fprintf(stderr, "Usage: %s <file>\n", argv[0]);
return -1;
}
// Register all formats and codecs
av_register_all();
// Initialise SDL2
if (SDL_Init(SDL_INIT_VIDEO | SDL_INIT_AUDIO | SDL_INIT_TIMER)) {
fprintf(stderr, "Couldn't initialise SDL - %s\n", SDL_GetError());
return -1;
}
// Setting things up
quitting = false;
memset(&videoq, 0, sizeof(PacketQueue));
// Open video file
if(avformat_open_input(&FormatCtx, argv[1], NULL, NULL) != 0){
fprintf(stderr, "Couldn't open file\n");
return -1; // Couldn't open file
}
// Retrieve stream information
if(avformat_find_stream_info(FormatCtx, NULL) < 0){
fprintf(stderr, "Couldn't find stream information\n");
// Close the video file
avformat_close_input(&FormatCtx);
return -1; // Couldn't find stream information
}
// Find the video stream
videoStream = av_find_best_stream(FormatCtx, AVMEDIA_TYPE_VIDEO, -1, -1, NULL, 0);
if(videoStream < 0){
fprintf(stderr, "Couldn't find video stream\n");
// Close the video file
avformat_close_input(&FormatCtx);
return -1; // Didn't find a video stream
}
// Get a pointer to the codec context for the video stream
CodecCtxOrig = FormatCtx->streams[videoStream]->codec;
// Find the decoder for the video stream
Codec = avcodec_find_decoder(CodecCtxOrig->codec_id);
if(Codec == NULL){
fprintf(stderr, "Unsupported codec\n");
// Close the codec
avcodec_close(CodecCtxOrig);
// Close the video file
avformat_close_input(&FormatCtx);
return -1; // Codec not found
}
// Copy context
CodecCtx = avcodec_alloc_context3(Codec);
if(avcodec_copy_context(CodecCtx, CodecCtxOrig) != 0){
fprintf(stderr, "Couldn't copy codec context");
// Close the codec
avcodec_close(CodecCtxOrig);
// Close the video file
avformat_close_input(&FormatCtx);
return -1; // Error copying codec context
}
// Open codec
if(avcodec_open2(CodecCtx, Codec, NULL) < 0){
fprintf(stderr, "Couldn't open codec\n");
// Close the codec
avcodec_close(CodecCtx);
avcodec_close(CodecCtxOrig);
// Close the video file
avformat_close_input(&FormatCtx);
return -1; // Could not open codec
}
// Allocate video frame
Frame = av_frame_alloc();
// Make a screen to put our video
screen = SDL_CreateWindow("Video Player", SDL_WINDOWPOS_UNDEFINED, SDL_WINDOWPOS_UNDEFINED, CodecCtx->width, CodecCtx->height, 0);
if(!screen){
fprintf(stderr, "SDL: could not create window - exiting\n");
quitting = true;
// Clean up SDL2
SDL_Quit();
// Free the YUV frame
av_frame_free(&Frame);
// Close the codec
avcodec_close(CodecCtx);
avcodec_close(CodecCtxOrig);
// Close the video file
avformat_close_input(&FormatCtx);
return -1;
}
renderer = SDL_CreateRenderer(screen, -1, 0);
if(!renderer){
fprintf(stderr, "SDL: could not create renderer - exiting\n");
quitting = true;
// Clean up SDL2
SDL_DestroyWindow(screen);
SDL_Quit();
// Free the YUV frame
av_frame_free(&Frame);
// Close the codec
avcodec_close(CodecCtx);
avcodec_close(CodecCtxOrig);
// Close the video file
avformat_close_input(&FormatCtx);
return -1;
}
// Allocate a place to put our YUV image on that screen
texture = SDL_CreateTexture(renderer, SDL_PIXELFORMAT_YV12, SDL_TEXTUREACCESS_STREAMING, CodecCtx->width, CodecCtx->height);
if(!texture){
fprintf(stderr, "SDL: could not create texture - exiting\n");
quitting = true;
// Clean up SDL2
SDL_DestroyRenderer(renderer);
SDL_DestroyWindow(screen);
SDL_Quit();
// Free the YUV frame
av_frame_free(&Frame);
// Close the codec
avcodec_close(CodecCtx);
avcodec_close(CodecCtxOrig);
// Close the video file
avformat_close_input(&FormatCtx);
return -1;
}
// Initialise SWS context for software scaling
SwsCtx = sws_getContext(CodecCtx->width, CodecCtx->height, CodecCtx->pix_fmt,
CodecCtx->width, CodecCtx->height, PIX_FMT_YUV420P, SWS_BILINEAR, NULL, NULL, NULL);
if(!SwsCtx){
fprintf(stderr, "Couldn't create sws context\n");
quitting = true;
// Clean up SDL2
SDL_DestroyTexture(texture);
SDL_DestroyRenderer(renderer);
SDL_DestroyWindow(screen);
SDL_Quit();
// Free the YUV frame
av_frame_free(&Frame);
// Close the codec
avcodec_close(CodecCtx);
avcodec_close(CodecCtxOrig);
// Close the video file
avformat_close_input(&FormatCtx);
return -1;
}
// set up YV12 pixel array (12 bits per pixel)
yPlane = std::shared_ptr<Uint8>((Uint8 *)::operator new (CodecCtx->width * CodecCtx->height, std::nothrow));
uPlane = std::shared_ptr<Uint8>((Uint8 *)::operator new (CodecCtx->width * CodecCtx->height / 4, std::nothrow));
vPlane = std::shared_ptr<Uint8>((Uint8 *)::operator new (CodecCtx->width * CodecCtx->height / 4, std::nothrow));
uvPitch = CodecCtx->width / 2;
if (!yPlane || !uPlane || !vPlane) {
fprintf(stderr, "Could not allocate pixel buffers - exiting\n");
quitting = true;
// Clean up SDL2
SDL_DestroyTexture(texture);
SDL_DestroyRenderer(renderer);
SDL_DestroyWindow(screen);
SDL_Quit();
// Free the YUV frame
av_frame_free(&Frame);
// Close the codec
avcodec_close(CodecCtx);
avcodec_close(CodecCtxOrig);
// Close the video file
avformat_close_input(&FormatCtx);
return -1;
}
std::thread videoTimerThread(videoTimerFunc, FormatCtx->streams[videoStream]->time_base, &videoq, std::move(pktprms));
while (!quitting) {
// Check for more packets
if(av_read_frame(FormatCtx, &packet) >= 0){
// Check what stream it belongs to
if (packet.stream_index == videoStream) {
packet_queue_put(&videoq, &packet);
}else{
// Free the packet that was allocated by av_read_frame
av_free_packet(&packet);
}
}
// Check if its time to update
if(pktftr.wait_for(std::chrono::milliseconds(1)) == std::future_status::ready){
// Getting packet
packet = pktftr.get();
// Decode video frame
avcodec_decode_video2(CodecCtx, Frame, &frameFinished, &packet);
// Did we get a video frame?
if (frameFinished) {
AVPicture pict;
pict.data[0] = yPlane.get();
pict.data[1] = uPlane.get();
pict.data[2] = vPlane.get();
pict.linesize[0] = CodecCtx->width;
pict.linesize[1] = uvPitch;
pict.linesize[2] = uvPitch;
// Convert the image into YUV format that SDL uses
sws_scale(SwsCtx, (uint8_t const * const *) Frame->data, Frame->linesize, 0, CodecCtx->height, pict.data, pict.linesize);
SDL_UpdateYUVTexture(texture, NULL, yPlane.get(), CodecCtx->width, uPlane.get(), uvPitch, vPlane.get(), uvPitch);
SDL_RenderClear(renderer);
SDL_RenderCopy(renderer, texture, NULL, NULL);
SDL_RenderPresent(renderer);
}
// Free the packet that was allocated by av_read_frame
av_free_packet(&packet);
}
SDL_PollEvent(&event);
switch (event.type) {
case SDL_QUIT:
quitting = true;
break;
default:
break;
}
}
videoTimerThread.join();
//SDL2 clean up
SDL_DestroyTexture(texture);
SDL_DestroyRenderer(renderer);
SDL_DestroyWindow(screen);
SDL_Quit();
// Free the YUV frame
av_frame_free(&Frame);
// Free Sws
sws_freeContext(SwsCtx);
// Close the codec
avcodec_close(CodecCtx);
avcodec_close(CodecCtxOrig);
// Close the video file
avformat_close_input(&FormatCtx);
return 0;
}
You can't use std::promise and std::future in a loop. Thanks to Igor Tandetnik, for commenting that.

FFMpeg How to use multithreading?

I want to decode H264 by ffmpeg, BUT finally I found the decode function only used one cpu core
system monitor
env: Ubuntu 14.04 FFMpeg 3.2.4 CPU i7-7500U
So, I search ffmpeg multithreading and decide using all cpu cores for decoding.
I set AVCodecContext as this:
//Init works
//codecId=AV_CODEC_ID_H264;
avcodec_register_all();
pCodec = avcodec_find_decoder(codecId);
if (!pCodec)
{
printf("Codec not found\n");
return -1;
}
pCodecCtx = avcodec_alloc_context3(pCodec);
if (!pCodecCtx)
{
printf("Could not allocate video codec context\n");
return -1;
}
pCodecParserCtx=av_parser_init(codecId);
if (!pCodecParserCtx)
{
printf("Could not allocate video parser context\n");
return -1;
}
pCodecCtx->thread_count = 4;
pCodecCtx->thread_type = FF_THREAD_FRAME;
pCodec->capabilities &= CODEC_CAP_TRUNCATED;
pCodecCtx->flags |= CODEC_FLAG_TRUNCATED;
if (avcodec_open2(pCodecCtx, pCodec, NULL) < 0)
{
printf("Could not open codec\n");
return -1;
}
av_log_set_level(AV_LOG_QUIET);
av_init_packet(&packet);
//parse and decode
//after av_parser_parse2, the packet has a complete frame data
//in decode function, I just call avcodec_decode_video2 and do some frame copy work
while (cur_size>0)
{
int len = av_parser_parse2(
pCodecParserCtx, pCodecCtx,
&packet.data, &packet.size,
cur_ptr, cur_size,
AV_NOPTS_VALUE, AV_NOPTS_VALUE, AV_NOPTS_VALUE);
cur_ptr += len;
cur_size -= len;
if(GetPacketSize()==0)
continue;
AVFrame *pFrame = av_frame_alloc();
int ret = Decode(pFrame);
if (ret < 0)
{
continue;
}
if (ret)
{
//some works
}
}
But nothing different with before.
How can I use multithreading in FFMpeg? Any advise?
pCodec->capabilities &= CODEC_CAP_TRUNCATED;
And that's your bug. Please remove this line. The return value of avcodec_find_decoder() should for all practical intents and purposes be considered const.
Specifically, this statement removes the AV_CODEC_CAP_FRAME_THREADS flag from the codec's capabilities, thus effectively disabling frame-multithreading in the rest of the code.

FFMPEG - How to save audio stream detached from video to file whithout transcoding

I try to use fwrite() to save audio stream. But, generated file can not be opened.
At the same time, I also try to use av_frame_write() to write packet. But, it can not write.
Please help me with this problem. How to write audio stream without transcoding....
/* open the input file with generic avformat function */
err = avformat_open_input(input_format_context, filename, NULL, NULL);
if (err < 0) {
return err;
}
/* If not enough info to get the stream parameters, we decode the
first frames to get it. (used in mpeg case for example) */
ret = avformat_find_stream_info(*input_format_context, 0);
if (ret < 0) {
av_log(NULL, AV_LOG_FATAL, "%s: could not find codec parameters\n", filename);
return ret;
}
/* dump the file content */
av_dump_format(*input_format_context, 0, filename, 0);
for (size_t i = 0; i < (*input_format_context)->nb_streams; i++) {
AVStream *st = (*input_format_context)->streams[i];
if (st->codec->codec_type == AVMEDIA_TYPE_AUDIO) {
FILE *file = NULL;
file = fopen("C:\\Users\\MyPC\\Downloads\\test.aac", "wb");
AVPacket reading_packet;
av_init_packet(&reading_packet);
while (av_read_frame(*input_format_context, &reading_packet) == 0) {
if (reading_packet.stream_index == (int) i) {
fwrite(reading_packet.data, 1, reading_packet.size, file);
}
av_free_packet(&reading_packet);
}
fclose(file);
return 0;
}
}
aac file require that frames have ADTS headers. If the file you are reading from does not use use ADTS frames (mp4 for example) you will need to manually create these headers, or use a bitstream filter. Also your code does not check to see if the codec is AAC.

Encoding FLOAT PCM to OGG using libav

I am currently trying to convert a raw PCM Float buffer to an OGG encoded file. I tried several library to do the encoding process and I finally chose libavcodec.
What I precisely want to do is get the float buffer ([-1;1]) provided by my audio library and turn it to a char buffer of encoded ogg data.
I managed to encode the float buffer to a buffer of encoded MP2 with this (proof of concept) code:
static AVCodec *codec;
static AVCodecContext *c;
static AVPacket pkt;
static uint16_t* samples;
static AVFrame* frame;
static int frameEncoded;
FILE *file;
int main(int argc, char *argv[])
{
file = fopen("file.ogg", "w+");
long ret;
avcodec_register_all();
codec = avcodec_find_encoder(AV_CODEC_ID_MP2);
if (!codec) {
fprintf(stderr, "codec not found\n");
exit(1);
}
c = avcodec_alloc_context3(NULL);
c->bit_rate = 256000;
c->sample_rate = 44100;
c->channels = 2;
c->sample_fmt = AV_SAMPLE_FMT_S16;
c->channel_layout = AV_CH_LAYOUT_STEREO;
/* open it */
if (avcodec_open2(c, codec, NULL) < 0) {
fprintf(stderr, "Could not open codec\n");
exit(1);
}
/* frame containing input raw audio */
frame = av_frame_alloc();
if (!frame) {
fprintf(stderr, "Could not allocate audio frame\n");
exit(1);
}
frame->nb_samples = c->frame_size;
frame->format = c->sample_fmt;
frame->channel_layout = c->channel_layout;
/* the codec gives us the frame size, in samples,
* we calculate the size of the samples buffer in bytes */
int buffer_size = av_samples_get_buffer_size(NULL, c->channels, c->frame_size,
c->sample_fmt, 0);
if (buffer_size < 0) {
fprintf(stderr, "Could not get sample buffer size\n");
exit(1);
}
samples = av_malloc(buffer_size);
if (!samples) {
fprintf(stderr, "Could not allocate %d bytes for samples buffer\n",
buffer_size);
exit(1);
}
/* setup the data pointers in the AVFrame */
ret = avcodec_fill_audio_frame(frame, c->channels, c->sample_fmt,
(const uint8_t*)samples, buffer_size, 0);
if (ret < 0) {
fprintf(stderr, "Could not setup audio frame\n");
exit(1);
}
}
void myLibraryCallback(float *inbuffer, unsigned int length)
{
for(int j = 0; j < (2 * length); j++) {
if(frameEncoded >= (c->frame_size *2)) {
int avret, got_output;
av_init_packet(&pkt);
pkt.data = NULL; // packet data will be allocated by the encoder
pkt.size = 0;
avret = avcodec_encode_audio2(c, &pkt, frame, &got_output);
if (avret < 0) {
fprintf(stderr, "Error encoding audio frame\n");
exit(1);
}
if (got_output) {
fwrite(pkt.data, 1, pkt.size, file);
av_free_packet(&pkt);
}
frameEncoded = 0;
}
samples[frameEncoded] = inbuffer[j] * SHRT_MAX;
frameEncoded++;
}
}
The code is really simple, I initialize libavencode the usual way, then my audio library sends me processed PCM FLOAT [-1;1] interleaved at 44.1Khz and the number of floats (usually 1024) in the inbuffer for each channel (2 for stereo). So usually, inbuffer contains 2048 floats.
That was easy since I just needed here to convert my PCM to 16P, both interleaved. Moreover it is possible to code a 16P sample on a single char.
Now I would like to apply this to OGG which needs a sample format of AV_SAMPLE_FMT_FLTP.
Since my native format is AV_SAMPLE_FMT_FLT, it should only be some desinterleaving. Which is really easy to do.
The points I don't get are:
How can you send a float buffer on a char buffer ? Do we treat them as-is (float* floatSamples = (float*) samples) ? If so, what means the sample number avcodec gives you ? Is it the number of floats or chars ?
How can you send datas on two buffers (one for left, one for right) when avcodec_fill_audio_frame only takes a (uint8_t*) parameter and not a (uint8_t**) for multiple channels ? Does-it completely change the previous sample code ?
I tried to find some answers myself and I made a LOT of experiments so far but I failed on theses points. Since there is a huge lack of documentation on these, I would be very grateful if you had answers.
Thank you !

app crashed at avformat_find_stream_info

I write a native method to simply get the codec id of a video, but my app never pass this line 'avformat_find_stream_info(pFormatCtx, NULL)'
here is my native method:
int get_video_info(JNIEnv *env, jobject thiz, jstring strInFile){
AVFormatContext *pFmtCtx;
AVCodecContext *pCCtx;
AVCodec *pCd;
AVFrame *picture;
int i, videoStream = -1, error = 0;
const char *in_file_name = (*env)->GetStringUTFChars(env, strInFile, NULL);
av_register_all();
LOGI(1, "HERE 0 %s", in_file_name); // app passed here
/*Open video file*/
pFmtCtx = avformat_alloc_context();
if((error=avformat_open_input(&pFmtCtx, in_file_name, NULL, NULL)) < 0){
LOGE(1, "Couldn't open file, error-code: %d with file url: %s", error, in_file_name);
return -1;
}
LOGI(1, "HERE 1 Duration: %d", pFmtCtx->duration); //app passed here
/*Retrieve the stream information, APP CRASH RIGHT HERE*/
if(avformat_find_stream_info(pFormatCtx, NULL)<0){
LOGE(1, "Couldn't retrieve stream information");
avformat_free_context(pFmtCtx);
return -1; // Couldn’t find stream information
}
LOGI(1, "HERE 2");
//Find the first video stream
videoStream=-1;
for(i=0; i<pFormatCtx->nb_streams; i++) {
if(pFormatCtx->streams[i]->codec->codec_type==AVMEDIA_TYPE_VIDEO){
videoStream=i;
break;
}
}
if(videoStream==-1){
avformat_free_context(pFmtCtx);
LOGE(1, "Didn't find a video stream");
return -1; // Didn’t find a video stream
}
// Get a pointer to the codec context for the video stream
pCCtx=pFormatCtx->streams[videoStream]->codec;
avformat_free_context(pFmtCtx);
(*env)->ReleaseStringUTFChars(env, strInFile, in_file_name);
return pCCtx->codec_id;
}
Question: Why I always fail finding stream info like this? Please help me fix it. Thanks
You are using pFormatCtx (which was not initialized) instead of pFmtCtx in several places in your code!
You could set pFormatCtx = pFmtCtx after avformat_alloc_context():

Resources