ffmpeg конвертировать в webm ошибка "слишком много невидимых фреймов" - PullRequest
0 голосов
/ 24 января 2019

Мне нужно конвертировать любой формат (например, mp4, avi и т. Д.) В .webm с собственным ioContext. Я собираю ffmpeg с помощью vpx, ogg, vorbis, opus и создаю простой проект. Но когда я пишу любой кадр, я получаю сообщение об ошибке «Слишком много невидимых кадров. Не удалось отправить пакет на фильтр vp9_superframe для потока 0»

Я уже пробовал конвертировать из webm в webm с копированием параметров кодека с помощью avcodec_parameters_copy, и это работает.

    #include <QCoreApplication>
#include <QFileInfo>
#include <iostream>
#include <fstream>

extern "C" {
#include <libavcodec/avcodec.h>
#include <libavformat/avformat.h>
#include <libavutil/timestamp.h>
#include <libavformat/avformat.h>
#include <libavfilter/buffersink.h>
#include <libavfilter/buffersrc.h>
#include <libavutil/opt.h>
#include <libavutil/pixdesc.h>
}

using namespace std;

struct BufferData {
    QByteArray data;
    uint fullsize;

    BufferData() {
        fullsize =0;
    }
};


static int write_packet_to_buffer(void *opaque, uint8_t *buf, int buf_size)         {
    BufferData *bufferData = static_cast<BufferData*>(opaque);
    bufferData->fullsize += buf_size;
    bufferData->data.append((const char*)buf, buf_size);
    return buf_size;
}


static bool writeBuffer(const QString &filename, BufferData *bufferData) {
    QFile file(filename);
    if( !file.open(QIODevice::WriteOnly) )  return false;
    file.write(bufferData->data);
    qDebug()<<"FILE SIZE = " << file.size();
    file.close();
    return true;
}

int main(int argc, char *argv[])
{
    QCoreApplication a(argc, argv);
    AVOutputFormat *ofmt = NULL;
    AVFormatContext *ifmt_ctx = NULL, *ofmt_ctx = NULL;
    AVPacket pkt;
    int ret;
    int stream_index = 0;
    int *stream_mapping = NULL;
    int stream_mapping_size = 0;

    const char *in_filename  = "../assets/sample.mp4";
    const char *out_filename = "../assets/sample_new.webm";


    //------------------------  Input file  ----------------------------
    if ((ret = avformat_open_input(&ifmt_ctx, in_filename, 0, 0)) < 0) {
        fprintf(stderr, "Could not open input file '%s'", in_filename);
        return 1;
    }

    if ((ret = avformat_find_stream_info(ifmt_ctx, 0)) < 0) {
        fprintf(stderr, "Failed to retrieve input stream information");
        return 1;
    }
    av_dump_format(ifmt_ctx, 0, in_filename, 0);
    //-----------------------------------------------------------------


    //---------------------- BUFFER -------------------------
   AVIOContext *avio_ctx = NULL;
   uint8_t *avio_ctx_buffer = NULL;
   size_t avio_ctx_buffer_size = 4096*1024;
   const size_t bd_buf_size = 1024*1024;
   /* fill opaque structure used by the AVIOContext write callback */
   avio_ctx_buffer = (uint8_t*)av_malloc(avio_ctx_buffer_size);
   if (!avio_ctx_buffer) return AVERROR(ENOMEM);

   BufferData bufferData;
   avio_ctx = avio_alloc_context(avio_ctx_buffer, avio_ctx_buffer_size,
                                 1, &bufferData, NULL,
                                 &write_packet_to_buffer, NULL);


   if (!avio_ctx) return AVERROR(ENOMEM);
   //------------------------------------------------------


    avformat_alloc_output_context2(&ofmt_ctx, NULL, NULL, out_filename);
        if (!ofmt_ctx) {
        fprintf(stderr, "Could not create output context\n");
        ret = AVERROR_UNKNOWN;
        return 1;
    }

    //------------------------  Stream list  ----------------------------
    stream_mapping_size = ifmt_ctx->nb_streams;
    stream_mapping = (int*)av_mallocz_array(stream_mapping_size,     sizeof(*stream_mapping));
    if (!stream_mapping) {
        ret = AVERROR(ENOMEM);
        return 1;
    }
    //-------------------------------------------------------------------



    //------------------------  Output file  ----------------------------
    AVCodec *encoder;
    AVCodecContext *input_ctx;
    AVCodecContext *enc_ctx;
    for (int i=0; i < ifmt_ctx->nb_streams; i++) {
        AVStream *out_stream;
        AVStream *in_stream = ifmt_ctx->streams[i];
        AVCodecParameters *in_codecpar = in_stream->codecpar;

        if (in_codecpar->codec_type != AVMEDIA_TYPE_AUDIO &&
            in_codecpar->codec_type != AVMEDIA_TYPE_VIDEO &&
            in_codecpar->codec_type != AVMEDIA_TYPE_SUBTITLE) {
            stream_mapping[i] = -1;
            continue;
        }

        enc_ctx = avcodec_alloc_context3(encoder);
        if (!enc_ctx) {
            av_log(NULL, AV_LOG_FATAL, "Failed to allocate the encoder context\n");
            return AVERROR(ENOMEM);
        }

        stream_mapping[i] = stream_index++;

        out_stream = avformat_new_stream(ofmt_ctx, NULL);
        if (!out_stream) {
            fprintf(stderr, "Failed allocating output stream\n");
            ret = AVERROR_UNKNOWN;
            return 1;
        }

        out_stream->codecpar->width = in_codecpar->width;
        out_stream->codecpar->height = in_codecpar->height;
        out_stream->codecpar->level = in_codecpar->level;
        out_stream->codecpar->format =in_codecpar->format;
        out_stream->codecpar->profile =in_codecpar->profile;
        out_stream->codecpar->bit_rate =in_codecpar->bit_rate;
        out_stream->codecpar->channels =in_codecpar->channels;
        out_stream->codecpar->codec_tag = 0;
        out_stream->codecpar->color_trc =in_codecpar->color_trc;
        out_stream->codecpar->codec_type =in_codecpar->codec_type;
        out_stream->codecpar->frame_size =in_codecpar->frame_size;
        out_stream->codecpar->block_align =in_codecpar->block_align;
        out_stream->codecpar->color_range =in_codecpar->color_range;
        out_stream->codecpar->color_space =in_codecpar->color_space;
        out_stream->codecpar->field_order =in_codecpar->field_order;
        out_stream->codecpar->sample_rate =in_codecpar->sample_rate;
        out_stream->codecpar->video_delay =in_codecpar->video_delay;
        out_stream->codecpar->seek_preroll =in_codecpar->seek_preroll;
        out_stream->codecpar->channel_layout =in_codecpar->channel_layout;
        out_stream->codecpar->chroma_location =in_codecpar->chroma_location;
        out_stream->codecpar->color_primaries =in_codecpar->color_primaries;
        out_stream->codecpar->initial_padding =in_codecpar->initial_padding;
        out_stream->codecpar->trailing_padding =in_codecpar->trailing_padding;
        out_stream->codecpar->bits_per_raw_sample = in_codecpar->bits_per_raw_sample;
        out_stream->codecpar->sample_aspect_ratio.num = in_codecpar->sample_aspect_ratio.num;
        out_stream->codecpar->sample_aspect_ratio.den = in_codecpar->sample_aspect_ratio.den;
        out_stream->codecpar->bits_per_coded_sample   = in_codecpar->bits_per_coded_sample;


        if (in_codecpar->codec_type == AVMEDIA_TYPE_VIDEO) {
            out_stream->codecpar->codec_id =ofmt_ctx->oformat->video_codec;
        }
        else if(in_codecpar->codec_type == AVMEDIA_TYPE_AUDIO) {
            out_stream->codecpar->codec_id = ofmt_ctx->oformat-    >audio_codec;
        }
    }
    av_dump_format(ofmt_ctx, 0, out_filename, 1);
    ofmt_ctx->pb = avio_ctx;

    ret = avformat_write_header(ofmt_ctx, NULL);
    if (ret < 0) {
        fprintf(stderr, "Error occurred when opening output file\n");
        return 1;
    }
    //------------------------------------------------------------------------------


    while (1) {
        AVStream *in_stream, *out_stream;

        ret = av_read_frame(ifmt_ctx, &pkt);
        if (ret < 0)
            break;

        in_stream  = ifmt_ctx->streams[pkt.stream_index];
        if (pkt.stream_index >= stream_mapping_size ||
            stream_mapping[pkt.stream_index] < 0) {
            av_packet_unref(&pkt);
            continue;
        }

        pkt.stream_index = stream_mapping[pkt.stream_index];
        out_stream = ofmt_ctx->streams[pkt.stream_index];

        /* copy packet */
        pkt.pts = av_rescale_q_rnd(pkt.pts, in_stream->time_base, out_stream->time_base, AVRounding(AV_ROUND_NEAR_INF|AV_ROUND_PASS_MINMAX));
        pkt.dts = av_rescale_q_rnd(pkt.dts, in_stream->time_base, out_stream->time_base, AVRounding(AV_ROUND_NEAR_INF|AV_ROUND_PASS_MINMAX));
        pkt.duration = av_rescale_q(pkt.duration, in_stream->time_base, out_stream->time_base);
        pkt.pos = -1;

        ret = av_interleaved_write_frame(ofmt_ctx, &pkt);
        if (ret < 0) {
            fprintf(stderr, "Error muxing packet\n");
            break;
        }
        av_packet_unref(&pkt);
    }
    av_write_trailer(ofmt_ctx);
    avformat_close_input(&ifmt_ctx);

    /* close output */
    writeBuffer(fileNameOut, &bufferData);
    avformat_free_context(ofmt_ctx);
    av_freep(&stream_mapping);
    if (ret < 0 && ret != AVERROR_EOF) {
        fprintf(stderr, "Error occurred: %d\n",ret);
        return 1;
    }
    return a.exec();
}
...