//////////////////////////////////////////////////////// // // GEM - Graphics Environment for Multimedia // // zmoelnig@iem.at // // Implementation file // // Copyright (c) 1997-1999 Mark Danks. // Copyright (c) Günther Geiger. // Copyright (c) 2001-2011 IOhannes m zmölnig. forum::für::umläute. IEM. zmoelnig@iem.at // For information on usage and redistribution, and for a DISCLAIMER OF ALL // WARRANTIES, see the file, "GEM.LICENSE.TERMS" in this distribution. // ///////////////////////////////////////////////////////// #ifdef HAVE_CONFIG_H # include "config.h" #endif #include #include "filmFFMPEG.h" #include "plugins/PluginFactory.h" #include "Gem/RTE.h" #include "Gem/Properties.h" using namespace gem::plugins; REGISTER_FILMFACTORY("ffmpeg", filmFFMPEG); ///////////////////////////////////////////////////////// // // filmFFMPEG // ///////////////////////////////////////////////////////// // Constructor // ///////////////////////////////////////////////////////// filmFFMPEG :: filmFFMPEG(void) : m_fps(20.), m_fps_num(1), m_fps_denum(1), m_numFrames(-1), m_numTracks(-1), m_codec(NULL), m_codecParam(NULL), sws_ctx(NULL) { } ///////////////////////////////////////////////////////// // open the file // ///////////////////////////////////////////////////////// bool filmFFMPEG :: open(const std::string&filename, const gem::Properties&wantProps) { // define destination pix_fmt format double d; if(wantProps.get("colorspace", d) && d>0) { m_wantedFormat=d; } switch(m_wantedFormat) { case GEM_RGBA: dst_pix_fmt = AV_PIX_FMT_RGBA; break; case GEM_YUV: dst_pix_fmt = AV_PIX_FMT_YUV422P; break; case GEM_GRAY: dst_pix_fmt = AV_PIX_FMT_GRAY10; break; default: AV_PIX_FMT_RGBA; break; } close(); // create libavformat context, find best video stream and get best codec m_formatContext = avformat_alloc_context(); if (avformat_open_input(&m_formatContext, filename.c_str(), NULL, NULL) != 0) { error("could not open the file %s", filename.c_str()); return false; } post("format %s", m_formatContext->iformat->name); post("format %d", m_formatContext->nb_streams); m_numTracks = m_formatContext->nb_streams; if (avformat_find_stream_info(m_formatContext, NULL) < 0) { error("could not get the stream info"); return false; } // find the video stream information ret = av_find_best_stream(m_formatContext, AVMEDIA_TYPE_VIDEO, -1, -1, &m_codec, 0); if (ret < 0) { error("Cannot find a video stream in the input file\n"); return false; } video_stream = ret; m_in_stream = m_formatContext->streams[video_stream]; m_codecParam = m_in_stream->codecpar; verbose(0,"AVStream->start_time %" PRId64, m_in_stream->start_time); m_codec = avcodec_find_decoder(m_codecParam->codec_id); if (m_codec==NULL) { error("unsupported codec!"); return false; } // print codec name, id and bitrate post("FFMPEG: \tCodec %s", m_codec->name); m_codecContext = avcodec_alloc_context3(m_codec); if (avcodec_parameters_to_context(m_codecContext, m_codecParam) < 0) { error("failed to copy codec params to codec context"); return NULL; } if (avcodec_open2(m_codecContext, m_codec, NULL) < 0) { error("failed to open codec through avcodec_open2"); return NULL; } // now we can define video properties m_fps_num = m_in_stream->r_frame_rate.num; m_fps_denum = m_in_stream->r_frame_rate.den; m_fps = (float)m_fps_num / (float)m_fps_denum; m_numFrames = m_in_stream->nb_frames; m_width = m_codecContext->width; m_height = m_codecContext->height; src_pix_fmt = m_codecContext->pix_fmt; // get the image size m_image.newfilm=true; m_image.image.notowned=true; m_image.image.xsize=m_width; m_image.image.ysize=m_height; m_image.image.setCsizeByFormat(m_wantedFormat); const char* colorSpace = av_get_pix_fmt_name(m_codecContext->pix_fmt); post("FFMPEG: codec_type %d, color space %s ( %d )", m_codecParam->codec_type, colorSpace, m_codecContext->pix_fmt); m_avframe = av_frame_alloc(); m_finalFrame = av_frame_alloc(); m_packet = av_packet_alloc(); uint8_t *buffer = NULL; int numBytes; // Determine required buffer size and allocate buffer numBytes=avpicture_get_size(dst_pix_fmt, m_width, m_height); buffer=(uint8_t *)av_malloc(numBytes*sizeof(uint8_t)); avpicture_fill((AVPicture *)m_finalFrame, buffer, dst_pix_fmt, m_width, m_height); // create scaling context sws_ctx = sws_getContext(m_width, m_height, src_pix_fmt, m_width, m_height, dst_pix_fmt, SWS_BILINEAR, NULL, NULL, NULL); if (!sws_ctx) { error( "Impossible to create scale context for the conversion " "fmt:%s s:%dx%d -> fmt:%s s:%dx%d\n", av_get_pix_fmt_name(src_pix_fmt), m_width, m_height, av_get_pix_fmt_name(dst_pix_fmt), m_width, m_height); ret = AVERROR(EINVAL); return false; } return true; } void filmFFMPEG::close(void) { // crash if I uncomment those lines // avformat_close_input(&m_formatContext); // sws_freeContext (ctx) ; // avformat_free_context(m_formatContext); // av_packet_free(&m_packet); // av_frame_free(&m_avframe); // avcodec_free_context(&m_codecContext); } bool filmFFMPEG::isThreadable(void) { return true; } ///////////////////////////////////////////////////////// // changeFrame // ///////////////////////////////////////////////////////// film::errCode filmFFMPEG :: changeImage(int imgNum, int trackNum) { // post("track: %d", trackNum); if(trackNum<0) { error ("no track defined"); // just automatically proceed to the next frame: this might speed up things for linear decoding return film::FAILURE; } if(imgNum>=m_numFrames || imgNum<0) { return film::FAILURE; } // convert timestamp to frame for seeking int64_t seekTarget = frameToPts(imgNum); av_seek_frame( m_formatContext , 0 , seekTarget , AVSEEK_FLAG_ANY ); return film::SUCCESS; } int64_t filmFFMPEG::frameToPts(int frame) const { return (int64_t(frame) * m_fps_denum * m_in_stream->time_base.den) / (int64_t(m_fps_num) * m_in_stream->time_base.num); } ///////////////////////////////////////////////////////// // render // ///////////////////////////////////////////////////////// pixBlock* filmFFMPEG :: getFrame() { //post("rendering"); response = 0; how_many_packets_to_process = 4; while ( av_read_frame(m_formatContext, m_packet) >= 0) { if (m_packet->stream_index == video_stream) { response = decode_packet(m_packet, m_codecContext, m_avframe); if (response < 0) break; if (--how_many_packets_to_process <= 0) break; } av_packet_unref(m_packet); } // av_packet_free(&m_packet); // av_frame_free(&m_avframe); m_image.newimage=true; return &m_image; // return NULL; } filmFFMPEG :: decode_packet(AVPacket *m_packet, AVCodecContext *m_codecContext, AVFrame *m_avframe) { int response = avcodec_send_packet(m_codecContext, m_packet); if (response < 0) { error("Error while sending a packet to the decoder"); return response; } while (response >= 0) { response = avcodec_receive_frame(m_codecContext, m_avframe); if (response == AVERROR(EAGAIN) || response == AVERROR_EOF) { // how_many_packets_to_process = 0; break; } else if (response < 0) { error("Error while receiving a frame from the decoder"); return response; } if (response >= 0) { sws_scale(sws_ctx,m_avframe->data, m_avframe->linesize, 0, m_height, m_finalFrame->data, m_finalFrame->linesize); // verbose(0,"frame : %d", m_avframe->coded_picture_number); m_image.image.data = m_finalFrame->data[0]; break; } } return 0; } bool filmFFMPEG::enumProperties(gem::Properties&readprops, gem::Properties&writeprops) { readprops.clear(); writeprops.clear(); double value=0.; readprops.set("width", value); readprops.set("height", value); readprops.set("fps", value); readprops.set("frames", value); return true; } void filmFFMPEG::setProperties(gem::Properties&props) { } void filmFFMPEG::getProperties(gem::Properties&props) { std::vector keys=props.keys(); gem::any value; double d; unsigned int i=0; for(i=0; i=0) { d=m_numFrames; value=d; props.set(key, value); } if("tracks"==key && m_numTracks>=0) { d=m_numTracks; value=d; props.set(key, value); } if("width"==key) { d=m_image.image.xsize; value=d; props.set(key, value); } if("height"==key) { d=m_image.image.ysize; value=d; props.set(key, value); } } }