I'm completely lost. I'm trying to capture 30 screenshots and put them into a video with FFMPEG
under Windows 10. And it keeps telling me that [swscaler @ 073890a0] bad src image pointers
. As a result the video is entirely green. If I change format to dshow
using video=screen-capture-recorder
the video looks to be mostly garbage. Here's my short code for that. I'm completely stuck and don't know even in which direction to look.
MainWindow.h
#ifndef MAINWINDOW_H
#define MAINWINDOW_H
#include <QMainWindow>
#include <QFuture>
#include <QFutureWatcher>
#include <QMutex>
#include <QMutexLocker>
extern "C" {
#include "libavcodec/avcodec.h"
#include "libavcodec/avfft.h"
#include "libavdevice/avdevice.h"
#include "libavfilter/avfilter.h"
#include "libavfilter/avfiltergraph.h"
#include "libavfilter/buffersink.h"
#include "libavfilter/buffersrc.h"
#include "libavformat/avformat.h"
#include "libavformat/avio.h"
#include "libavutil/opt.h"
#include "libavutil/common.h"
#include "libavutil/channel_layout.h"
#include "libavutil/imgutils.h"
#include "libavutil/mathematics.h"
#include "libavutil/samplefmt.h"
#include "libavutil/time.h"
#include "libavutil/opt.h"
#include "libavutil/pixdesc.h"
#include "libavutil/file.h"
#include "libswscale/swscale.h"
}
class MainWindow : public QMainWindow
{
Q_OBJECT
public:
MainWindow(QWidget *parent = 0);
~MainWindow();
private:
AVFormatContext *inputFormatContext = nullptr;
AVFormatContext *outFormatContext = nullptr;
AVStream* videoStream = nullptr;
AVDictionary* options = nullptr;
AVCodec* outCodec = nullptr;
AVCodec* inputCodec = nullptr;
AVCodecContext* inputCodecContext = nullptr;
AVCodecContext* outCodecContext = nullptr;
SwsContext* swsContext = nullptr;
private:
void init();
void initOutFile();
void collectFrame();
};
#endif // MAINWINDOW_H
MainWindow.cpp
#include "MainWindow.h"
#include <QGuiApplication>
#include <QLabel>
#include <QScreen>
#include <QTimer>
#include <QLayout>
#include <QImage>
#include <QtConcurrent/QtConcurrent>
#include <QThreadPool>
#include "ScreenCapture.h"
MainWindow::MainWindow(QWidget *parent) : QMainWindow(parent)
{
resize(800, 600);
auto label = new QLabel();
label->setAlignment(Qt::AlignHCenter | Qt::AlignVCenter);
auto layout = new QHBoxLayout();
layout->addWidget(label);
auto widget = new QWidget();
widget->setLayout(layout);
setCentralWidget(widget);
init();
initOutFile();
collectFrame();
}
MainWindow::~MainWindow()
{
avformat_close_input(&inputFormatContext);
avformat_free_context(inputFormatContext);
QThreadPool::globalInstance()->waitForDone();
}
void MainWindow::init()
{
av_register_all();
avcodec_register_all();
avdevice_register_all();
avformat_network_init();
auto screen = QGuiApplication::screens()[0];
QRect geometry = screen->geometry();
inputFormatContext = avformat_alloc_context();
options = NULL;
av_dict_set(&options, "framerate", "30", NULL);
av_dict_set(&options, "offset_x", QString::number(geometry.x()).toLatin1().data(), NULL);
av_dict_set(&options, "offset_y", QString::number(geometry.y()).toLatin1().data(), NULL);
av_dict_set(&options, "video_size", QString(QString::number(geometry.width()) + "x" + QString::number(geometry.height())).toLatin1().data(), NULL);
av_dict_set(&options, "show_region", "1", NULL);
AVInputFormat* inputFormat = av_find_input_format("gdigrab");
avformat_open_input(&inputFormatContext, "desktop", inputFormat, &options);
int videoStreamIndex = av_find_best_stream(inputFormatContext, AVMEDIA_TYPE_VIDEO, -1, -1, NULL, 0);
inputCodecContext = inputFormatContext->streams[videoStreamIndex]->codec;
inputCodecContext->width = geometry.width();
inputCodecContext->height = geometry.height();
inputCodecContext->pix_fmt = AV_PIX_FMT_YUV420P;
inputCodec = avcodec_find_decoder(inputCodecContext->codec_id);
avcodec_open2(inputCodecContext, inputCodec, NULL);
}
void MainWindow::initOutFile()
{
const char* filename = "C:/Temp/output.mp4";
avformat_alloc_output_context2(&outFormatContext, NULL, NULL, filename);
outCodec = avcodec_find_encoder(AV_CODEC_ID_MPEG4);
videoStream = avformat_new_stream(outFormatContext, outCodec);
videoStream->time_base = {1, 30};
outCodecContext = videoStream->codec;
outCodecContext->codec_id = AV_CODEC_ID_MPEG4;
outCodecContext->codec_type = AVMEDIA_TYPE_VIDEO;
outCodecContext->pix_fmt = AV_PIX_FMT_YUV420P;
outCodecContext->bit_rate = 400000;
outCodecContext->width = inputCodecContext->width;
outCodecContext->height = inputCodecContext->height;
outCodecContext->gop_size = 3;
outCodecContext->max_b_frames = 2;
outCodecContext->time_base = videoStream->time_base;
if (outFormatContext->oformat->flags & AVFMT_GLOBALHEADER)
outCodecContext->flags |= AV_CODEC_FLAG_GLOBAL_HEADER;
avcodec_open2(outCodecContext, outCodec, NULL);
if (!(outFormatContext->flags & AVFMT_NOFILE))
avio_open2(&outFormatContext->pb, filename, AVIO_FLAG_WRITE, NULL, NULL);
swsContext = sws_getContext(inputCodecContext->width,
inputCodecContext->height,
inputCodecContext->pix_fmt,
outCodecContext->width,
outCodecContext->height,
outCodecContext->pix_fmt,
SWS_BICUBIC, NULL, NULL, NULL);
avformat_write_header(outFormatContext, &options);
}
void MainWindow::collectFrame()
{
AVFrame* frame = av_frame_alloc();
frame->data[0] = NULL;
frame->width = inputCodecContext->width;
frame->height = inputCodecContext->height;
frame->format = inputCodecContext->pix_fmt;
av_image_alloc(frame->data, frame->linesize, inputCodecContext->width, inputCodecContext->height, (AVPixelFormat)frame->format, 32);
AVFrame* outFrame = av_frame_alloc();
outFrame->data[0] = NULL;
outFrame->width = outCodecContext->width;
outFrame->height = outCodecContext->height;
outFrame->format = outCodecContext->pix_fmt;
av_image_alloc(outFrame->data, outFrame->linesize, outCodecContext->width, outCodecContext->height, (AVPixelFormat)outFrame->format, 32);
int bufferSize = av_image_get_buffer_size(outCodecContext->pix_fmt,
outCodecContext->width,
outCodecContext->height,
24);
uint8_t* outBuffer = (uint8_t*)av_malloc(bufferSize);
avpicture_fill((AVPicture*)outFrame, outBuffer,
AV_PIX_FMT_YUV420P,
outCodecContext->width, outCodecContext->height);
int frameCount = 30;
int count = 0;
AVPacket* packet = (AVPacket*)av_malloc(sizeof(AVPacket));
av_init_packet(packet);
while(av_read_frame(inputFormatContext, packet) >= 0)
{
if(packet->stream_index == videoStream->index)
{
int frameFinished = 0;
avcodec_decode_video2(inputCodecContext, frame, &frameFinished, packet);
if(frameFinished)
{
if(++count > frameCount)
{
qDebug() << "FINISHED!";
break;
}
sws_scale(swsContext, frame->data, frame->linesize, 0, inputCodecContext->height, outFrame->data, outFrame->linesize);
AVPacket outPacket;
av_init_packet(&outPacket);
outPacket.data = NULL;
outPacket.size = 0;
int got_picture = 0;
avcodec_encode_video2(outCodecContext, &outPacket, outFrame, &got_picture);
if(got_picture)
{
if(outPacket.pts != AV_NOPTS_VALUE) outPacket.pts = av_rescale_q(outPacket.pts, videoStream->codec->time_base, videoStream->time_base);
if(outPacket.dts != AV_NOPTS_VALUE) outPacket.dts = av_rescale_q(outPacket.dts, videoStream->codec->time_base, videoStream->time_base);
av_write_frame(outFormatContext , &outPacket);
}
av_packet_unref(&outPacket);
}
}
}
av_write_trailer(outFormatContext);
av_free(outBuffer);
}
I think the problem is you use some unnecessary code and some deprecated functions, these lines are unnecessary:
int bufferSize = av_image_get_buffer_size(outCodecContext->pix_fmt,
outCodecContext->width,
outCodecContext->height,
24);
uint8_t* outBuffer = (uint8_t*)av_malloc(bufferSize);
avpicture_fill((AVPicture*)outFrame, outBuffer,
AV_PIX_FMT_YUV420P,
outCodecContext->width, outCodecContext->height);
Doing this will corrupt the already OK frame, will also cause memory leak problems too. Because you already allocated correct space for yuv420p planar buffer with av_image_alloc
although you didn't check its return code in case of failure. You may keep the buffer size for other purpose maybe. Oh and don't forget to remove av_free(outBuffer)
also.
These should be replaced:
AVPacket* packet = (AVPacket*)av_malloc(sizeof(AVPacket));
av_init_packet(packet);
with this:
AVPacket *packet = av_packet_alloc(); // also inits to defaults
if (packet == NULL) {
//hande error
}
Other things are, your avcodec_decode_video2
and avcodec_encode_video2
also deprecated but should still work. Lastly according to my experience av_interleaved_write_frame
works better then av_write_frame
.
New decode and encode api examples may be found here: https://github.com/FFmpeg/FFmpeg/tree/master/doc/examples
Hope that helps.
If you love us? You can donate to us via Paypal or buy me a coffee so we can maintain and grow! Thank you!
Donate Us With