使用libavcodec和VAAPI从C ++编码视频

I am trying to encode a video in H.264 with libavcodec. It works when I use the software encoder "libx264", however it does not when I try to use the hardware encoder of my Intel cpu with VAAPI. Hardware encoding with ffmpeg via VAAPI works from the command line (using commands from here).

Apparently, there is no example or tutorial how to encode with VAAPI and libav*. I read through those of ffmpeg's examples that cover a related use case (hardware decoding, software encoding, muxing) and tried to adapt them accordingly.

When I setup the VAAPI encoder, avcodec_open2() returns AVERROR(EINVAL) (-22) and it prints the following error message to the console:

AVCodecContext.pix_fmt和AVHWFramesContext.format不匹配

You can find it at the end of Encoder::setupEncoder() in my code. What am I missing?

以下是我的代码,该代码分为三个文件:

编码器

#ifndef ENCODER_H
#define ENCODER_H
#include <cassert>

extern "C" {
#include <libavcodec/avcodec.h>
#include <libavformat/avformat.h>
#include <libavutil/opt.h>
#include <libavutil/hwcontext.h>
}

class Encoder
{
public:
    Encoder(const bool hwAccel);
    void addFrame(AVFrame* frame);
    void flush();

    static constexpr int s_width = 640;
    static constexpr int s_height = 480;
    static constexpr int s_fps = 25;
private:
    void setup();
    void setupEncoder();
    void encodeFrame(AVFrame* frame);

    // members
    int m_frameId = 1;
    const bool m_hardwareAcceleration = false;

    AVCodecContext* m_encoder = nullptr;
    AVFormatContext* m_muxer = nullptr;
    AVStream* m_avStream = nullptr;
    AVBufferRef* m_device = nullptr;

    AVFrame* m_hwFrame = nullptr;
};

#endif // ENCODER_H

编码器

#include "encoder.h"

extern "C" {

static enum AVPixelFormat get_vaapi_format(AVCodecContext*, const enum AVPixelFormat *pix_fmts)
{
    const enum AVPixelFormat *p;
    for (p = pix_fmts; *p != AV_PIX_FMT_NONE; p++) {
        if (*p == AV_PIX_FMT_VAAPI)
            return *p;
    }
    fprintf(stderr, "Failed to get HW surface format.\n");
    return AV_PIX_FMT_NONE;
}

}

Encoder::Encoder(const bool hwAccel)
  : m_hardwareAcceleration(hwAccel)
{
    setup();
}
void Encoder::addFrame(AVFrame* frame)
{
    AVFrame* frameToEncode = frame;
    if(m_hardwareAcceleration) {
        assert(frame->format == AV_PIX_FMT_NV12);
        av_hwframe_transfer_data(m_hwFrame, frame, 0);
        assert(m_hwFrame->format == AV_PIX_FMT_VAAPI);
        frameToEncode = m_hwFrame;
    }

    frameToEncode->pts = m_frameId++;
    encodeFrame(frameToEncode);
}
void Encoder::flush()
{
    encodeFrame(nullptr);
    av_write_trailer(m_muxer);
}

void Encoder::setup()
{
    assert(avformat_alloc_output_context2(&m_muxer, nullptr, "matroska", nullptr) == 0);
    assert(m_muxer != nullptr);

    setupEncoder();

    m_avStream = avformat_new_stream(m_muxer, nullptr);
    assert(m_avStream != nullptr);
    m_avStream->id = m_muxer->nb_streams-1;
    m_avStream->time_base = m_encoder->time_base;

    // Some formats want stream headers to be separate.
    if(m_muxer->oformat->flags & AVFMT_GLOBALHEADER)
        m_encoder->flags |= AV_CODEC_FLAG_GLOBAL_HEADER;

    assert(avcodec_parameters_from_context(m_avStream->codecpar, m_encoder) == 0);
    assert(avio_open(&m_muxer->pb, m_hardwareAcceleration? "hardware.mkv": "software.mkv", AVIO_FLAG_WRITE) == 0);
    assert(avformat_write_header(m_muxer, nullptr) == 0);
}
void Encoder::setupEncoder()
{
    const char* encoderName = m_hardwareAcceleration? "h264_vaapi": "libx264";
    AVCodec* videoCodec = avcodec_find_encoder_by_name(encoderName);
    m_encoder = avcodec_alloc_context3(videoCodec);
    m_encoder->bit_rate = s_width * s_height * s_fps * 2;
    m_encoder->width = s_width;
    m_encoder->height = s_height;
    m_encoder->time_base = (AVRational){1, s_fps};
    m_encoder->framerate = (AVRational){s_fps, 1};

    m_encoder->gop_size = s_fps;  // have at least 1 I-frame per second
    m_encoder->max_b_frames = 1;
    m_encoder->pix_fmt = AV_PIX_FMT_YUV420P;

    if(m_hardwareAcceleration) {
        m_encoder->pix_fmt = AV_PIX_FMT_VAAPI;
        m_encoder->get_format = get_vaapi_format;

        assert(av_hwdevice_ctx_create(&m_device, AV_HWDEVICE_TYPE_VAAPI, "/dev/dri/renderD128", nullptr, 0) == 0);

        AVHWDeviceContext* deviceCtx = (AVHWDeviceContext*) m_device->data;
        assert(deviceCtx->type == AV_HWDEVICE_TYPE_VAAPI);

        m_encoder->hw_device_ctx = av_hwframe_ctx_alloc(m_device);
        m_encoder->hw_frames_ctx = av_buffer_ref(m_device);
        m_hwFrame = av_frame_alloc();
        av_hwframe_get_buffer(m_encoder->hw_device_ctx, m_hwFrame, 0);
    }

    assert(avcodec_open2(m_encoder, videoCodec, nullptr) == 0);  // <-- returns -22 (EINVAL) for hardware encoder

    m_muxer->video_codec_id = videoCodec->id;
    m_muxer->video_codec = videoCodec;
}
void Encoder::encodeFrame(AVFrame* frame)
{
    assert(avcodec_send_frame(m_encoder, frame) == 0);

    AVPacket packet;
    av_init_packet(&packet);
    int ret = 0;
    while(ret >= 0) {
        ret = avcodec_receive_packet(m_encoder, &packet);
        if(ret == AVERROR(EAGAIN) || ret == AVERROR_EOF) {
            return;  // nothing to write
        }
        assert(ret >= 0);

        av_packet_rescale_ts(&packet, m_encoder->time_base, m_avStream->time_base);
        packet.stream_index = m_avStream->index;
        av_interleaved_write_frame(m_muxer, &packet);
        av_packet_unref(&packet);
    }
}

main.cpp

#include "encoder.h"

AVFrame* createFrame(const int format)
{
    AVFrame* frame = av_frame_alloc();
    frame->format = format;
    frame->width  = Encoder::s_width;
    frame->height = Encoder::s_height;
    assert(av_frame_get_buffer(frame, 0) == 0);
    assert(av_frame_make_writable(frame) == 0);

    // Y
    for(int y=0; y<frame->height; y++) {
        for(int x=0; x<frame->width; x++) {
            frame->data[0][y * frame->linesize[0] + x] = 0;
        }
    }

    // CbCr
    const int widthCbCr  = frame->width / 2;
    const int heightCbCr = frame->height / 2;

    if(format == AV_PIX_FMT_YUV420P) {
        for(int y=0; y<heightCbCr; y++) {
            for(int x=0; x<widthCbCr; x++) {
                frame->data[1][y * frame->linesize[1] + x] = 0;  // Cb
                frame->data[2][y * frame->linesize[2] + x] = 0;  // Cr
            }
        }
        return frame;
    }
    else if(format == AV_PIX_FMT_NV12) {
        for(int y=0; y<heightCbCr; y++) {
            for(int x=0; x<widthCbCr; x++) {
                frame->data[1][y * frame->linesize[0] + x] = 0;
            }
        }
        return frame;
    }

    return nullptr;
}

int main()
{
    av_register_all();

    AVFrame* yuv420pFrame = createFrame(AV_PIX_FMT_YUV420P);
    AVFrame* nv12Frame = createFrame(AV_PIX_FMT_NV12);

    // works well
    Encoder softwareEncoder(false);
    for(int i=0; i<100; ++i)
        softwareEncoder.addFrame(yuv420pFrame);
    softwareEncoder.flush();

    // does not work
    Encoder hardwareEncoder(true);
    for(int i=0; i<100; ++i)
        hardwareEncoder.addFrame(nv12Frame);
    hardwareEncoder.flush();

    return 0;
}

并不是说我故意遗漏了各种free()函数和析构函数以使代码简短。