VS2013 / MFC + FFmpeg实现录屏

这是一个用FFmpeg实现的录屏功能,其中包括录制屏幕和声音,录制后的视屏为MPEG4 音频为AAC。主要程序就是创建了三个线程(ScreenCapThreadProc、AudioCapThreadProc、OutPutThreadProc)分别用来捕获桌面图片、捕获声音,将捕获到的桌面图片和声音写成一个视频文件。

ScreenCapThreadProc线程函数:

DWORD WINAPI ScreenCapThreadProc(LPVOID lpParam)
{
	AVPacket *packet = (AVPacket *)av_malloc(sizeof(AVPacket));
	int got_picture;
	AVFrame	*pFrame;
	pFrame = av_frame_alloc();

	AVFrame *picture = av_frame_alloc();
	int size = avpicture_get_size(pFormatCtx_Out->streams[VideoIndex]->codec->pix_fmt,
		pFormatCtx_Out->streams[VideoIndex]->codec->width, pFormatCtx_Out->streams[VideoIndex]->codec->height);
	picture_buf = new uint8_t[size];

	avpicture_fill((AVPicture *)picture, picture_buf,
		pFormatCtx_Out->streams[VideoIndex]->codec->pix_fmt,
		pFormatCtx_Out->streams[VideoIndex]->codec->width,
		pFormatCtx_Out->streams[VideoIndex]->codec->height);

	FILE *p = NULL;
	p = fopen("proc_test.yuv", "wb+");

	int height = pFormatCtx_Out->streams[VideoIndex]->codec->height;
	int width = pFormatCtx_Out->streams[VideoIndex]->codec->width;
	int y_size = height*width;
	while (bCap)
	{
		if (av_read_frame(pFormatCtx_Video, packet) < 0)
		{
			continue;
		}
		if (packet->stream_index == 0)
		{
			if (avcodec_decode_video2(pCodecCtx_Video, pFrame, &got_picture, packet) < 0)
			{
				printf("Decode Error.(解码错误)\n");
				continue;
			}
			if (got_picture)
			{
				sws_scale(img_convert_ctx, (const uint8_t* const*)pFrame->data, pFrame->linesize, 0,
					pFormatCtx_Out->streams[VideoIndex]->codec->height, picture->data, picture->linesize);

				if (av_fifo_space(fifo_video) >= size)
				{
					EnterCriticalSection(&VideoSection);
					av_fifo_generic_write(fifo_video, picture->data[0], y_size, NULL);
					av_fifo_generic_write(fifo_video, picture->data[1], y_size / 4, NULL);
					av_fifo_generic_write(fifo_video, picture->data[2], y_size / 4, NULL);
					LeaveCriticalSection(&VideoSection);
				}
			}
		}
	}
	av_frame_free(&pFrame);
	av_frame_free(&picture);
	delete[] picture_buf;
	return 0;
}

AudioCapThreadProc线程函数:

DWORD WINAPI AudioCapThreadProc(LPVOID lpParam)
{
	AVPacket pkt;
	AVFrame *frame;
	frame = av_frame_alloc();
	int gotframe;
	while (bCap)
	{
		if (av_read_frame(pFormatCtx_Audio, &pkt) < 0)
		{
			continue;
		}

		if (avcodec_decode_audio4(pFormatCtx_Audio->streams[0]->codec, frame, &gotframe, &pkt) < 0)
		{
			av_frame_free(&frame);
			printf("can not decoder a frame");
			break;
		}
		av_free_packet(&pkt);

		if (!gotframe)
		{
			continue;//没有获取到数据,继续下一次
		}

		if (NULL == fifo_audio)
		{
			fifo_audio = av_audio_fifo_alloc(pFormatCtx_Audio->streams[0]->codec->sample_fmt,
				pFormatCtx_Audio->streams[0]->codec->channels, 30 * frame->nb_samples);
		}

		int buf_space = av_audio_fifo_space(fifo_audio);
		if (av_audio_fifo_space(fifo_audio) >= frame->nb_samples)
		{
			EnterCriticalSection(&AudioSection);
			av_audio_fifo_write(fifo_audio, (void **)frame->data, frame->nb_samples);
			LeaveCriticalSection(&AudioSection);
		}
	}
	av_frame_free(&frame);
	return 0;
}

OutPutThreadProc线程函数:

DWORD WINAPI OutPutThreadProc(LPVOID lpParam){

	AVFrame *picture = av_frame_alloc();
	int size = avpicture_get_size(pFormatCtx_Out->streams[VideoIndex]->codec->pix_fmt,
		pFormatCtx_Out->streams[VideoIndex]->codec->width, pFormatCtx_Out->streams[VideoIndex]->codec->height);
	picture_buf = new uint8_t[size];

	avpicture_fill((AVPicture *)picture, picture_buf,
		pFormatCtx_Out->streams[VideoIndex]->codec->pix_fmt,
		pFormatCtx_Out->streams[VideoIndex]->codec->width,
		pFormatCtx_Out->streams[VideoIndex]->codec->height);



	int64_t cur_pts_v = 0, cur_pts_a = 0;
	int VideoFrameIndex = 0, AudioFrameIndex = 0;

	while (bCap)
	{
		if (flagThread && bCap)
		{
			bCap = false;
			Sleep(2000);//简单的用sleep等待采集线程关闭
			continue;
		}
		if (fifo_audio && fifo_video)
		{
			int sizeAudio = av_audio_fifo_size(fifo_audio);
			int sizeVideo = av_fifo_size(fifo_video);
			//缓存数据写完就结束循环
			if (av_audio_fifo_size(fifo_audio) <= pFormatCtx_Out->streams[AudioIndex]->codec->frame_size &&
				av_fifo_size(fifo_video) <= frame_size && !bCap)
			{
				break;
			}
		}

		if (av_compare_ts(cur_pts_v, pFormatCtx_Out->streams[VideoIndex]->time_base,
			cur_pts_a, pFormatCtx_Out->streams[AudioIndex]->time_base) <= 0)
		{
			//read data from fifo
			if (av_fifo_size(fifo_video) < frame_size && !bCap)
			{
				cur_pts_v = 0x7fffffffffffffff;
			}
			if (av_fifo_size(fifo_video) >= size)
			{
				EnterCriticalSection(&VideoSection);
				av_fifo_generic_read(fifo_video, picture_buf, size, NULL);
				LeaveCriticalSection(&VideoSection);

				avpicture_fill((AVPicture *)picture, picture_buf,
					pFormatCtx_Out->streams[VideoIndex]->codec->pix_fmt,
					pFormatCtx_Out->streams[VideoIndex]->codec->width,
					pFormatCtx_Out->streams[VideoIndex]->codec->height);

				//pts = n * ((1 / timbase)/ fps);
				picture->pts = VideoFrameIndex * ((pFormatCtx_Video->streams[0]->time_base.den / pFormatCtx_Video->streams[0]->time_base.num) / 15);

				int got_picture = 0;
				AVPacket pkt;
				av_init_packet(&pkt);

				pkt.data = NULL;
				pkt.size = 0;
				int ret = avcodec_encode_video2(pFormatCtx_Out->streams[VideoIndex]->codec, &pkt, picture, &got_picture);
				if (ret < 0)
				{
					//编码错误,不理会此帧
					continue;
				}

				if (got_picture == 1)
				{
					pkt.stream_index = VideoIndex;
					pkt.pts = av_rescale_q_rnd(pkt.pts, pFormatCtx_Video->streams[0]->time_base,
						pFormatCtx_Out->streams[VideoIndex]->time_base, (AVRounding)(AV_ROUND_NEAR_INF | AV_ROUND_PASS_MINMAX));
					pkt.dts = av_rescale_q_rnd(pkt.dts, pFormatCtx_Video->streams[0]->time_base,
						pFormatCtx_Out->streams[VideoIndex]->time_base, (AVRounding)(AV_ROUND_NEAR_INF | AV_ROUND_PASS_MINMAX));

					pkt.duration = ((pFormatCtx_Out->streams[0]->time_base.den / pFormatCtx_Out->streams[0]->time_base.num) / 15);

					cur_pts_v = pkt.pts;

					ret = av_interleaved_write_frame(pFormatCtx_Out, &pkt);
					//delete[] pkt.data;
					av_free_packet(&pkt);
				}
				VideoFrameIndex++;
			}
		}
		else
		{
			if (NULL == fifo_audio)
			{
				continue;//还未初始化fifo
			}
			if (av_audio_fifo_size(fifo_audio) < pFormatCtx_Out->streams[AudioIndex]->codec->frame_size && !bCap)
			{
				cur_pts_a = 0x7fffffffffffffff;
			}
			if (av_audio_fifo_size(fifo_audio) >=
				(pFormatCtx_Out->streams[AudioIndex]->codec->frame_size > 0 ? pFormatCtx_Out->streams[AudioIndex]->codec->frame_size : 1024))
			{
				AVFrame *frame;
				frame = av_frame_alloc();
				frame->nb_samples = pFormatCtx_Out->streams[AudioIndex]->codec->frame_size > 0 ? pFormatCtx_Out->streams[AudioIndex]->codec->frame_size : 1024;
				frame->channel_layout = pFormatCtx_Out->streams[AudioIndex]->codec->channel_layout;
				frame->format = pFormatCtx_Out->streams[AudioIndex]->codec->sample_fmt;
				frame->sample_rate = pFormatCtx_Out->streams[AudioIndex]->codec->sample_rate;
				av_frame_get_buffer(frame, 0);

				EnterCriticalSection(&AudioSection);
				av_audio_fifo_read(fifo_audio, (void **)frame->data,
					(pFormatCtx_Out->streams[AudioIndex]->codec->frame_size > 0 ? pFormatCtx_Out->streams[AudioIndex]->codec->frame_size : 1024));
				LeaveCriticalSection(&AudioSection);

				if (pFormatCtx_Out->streams[0]->codec->sample_fmt != pFormatCtx_Audio->streams[AudioIndex]->codec->sample_fmt
					|| pFormatCtx_Out->streams[0]->codec->channels != pFormatCtx_Audio->streams[AudioIndex]->codec->channels
					|| pFormatCtx_Out->streams[0]->codec->sample_rate != pFormatCtx_Audio->streams[AudioIndex]->codec->sample_rate)
				{
					//如果输入和输出的音频格式不一样 需要重采样,这里是一样的就没做
				}

				AVPacket pkt_out;
				av_init_packet(&pkt_out);
				int got_picture = -1;
				pkt_out.data = NULL;
				pkt_out.size = 0;

				frame->pts = AudioFrameIndex * pFormatCtx_Out->streams[AudioIndex]->codec->frame_size;
				if (avcodec_encode_audio2(pFormatCtx_Out->streams[AudioIndex]->codec, &pkt_out, frame, &got_picture) < 0)
				{
					printf("can not decoder a frame");
				}
				av_frame_free(&frame);
				if (got_picture)
				{
					pkt_out.stream_index = AudioIndex;
					pkt_out.pts = AudioFrameIndex * pFormatCtx_Out->streams[AudioIndex]->codec->frame_size;
					pkt_out.dts = AudioFrameIndex * pFormatCtx_Out->streams[AudioIndex]->codec->frame_size;
					pkt_out.duration = pFormatCtx_Out->streams[AudioIndex]->codec->frame_size;

					cur_pts_a = pkt_out.pts;

					int ret = av_interleaved_write_frame(pFormatCtx_Out, &pkt_out);
					av_free_packet(&pkt_out);
				}
				AudioFrameIndex++;
			}
		}
	}

	av_write_trailer(pFormatCtx_Out);

	avio_close(pFormatCtx_Out->pb);
	avformat_free_context(pFormatCtx_Out);

	if (pFormatCtx_Video != NULL)
	{
		avformat_close_input(&pFormatCtx_Video);
	
		pFormatCtx_Video = NULL;
	}
	if (pFormatCtx_Audio != NULL)
	{
		avformat_close_input(&pFormatCtx_Audio);
		pFormatCtx_Audio = NULL;
	}

	return 0;
}
最后结果:

VS2013 / MFC + FFmpeg实现录屏_第1张图片

运行工程需要注意:在AudioCapThreadProc线程函数中以Direct Show的方式打开设备捕获声音时,需要使用ffmpeg命令显示本机的设备名称

ffmpeg命令:

ffmpeg -list_devices true -f dshow -i dummy

VS2013 / MFC + FFmpeg实现录屏_第2张图片

音频设备出现乱码,

解决办法1:把乱码ANSI转UTF-8

解决办法2:还有一种更简单的方式查看设备的名称。即不使用FFmpeg查看系统DirectShow输入设备的名称,而使用DirectShow SDK自带的工具GraphEdit(或者网上下一个GraphStudioNext)查看输入名称。

打开graphstudionext.exe,“图像->插入滤镜”

VS2013 / MFC + FFmpeg实现录屏_第3张图片

选择Audio Capture Sources来查看音频输入设备的简体中文名称

VS2013 / MFC + FFmpeg实现录屏_第4张图片

csdn源码下载:http://download.csdn.net/detail/davebobo/9492724





你可能感兴趣的:(我的开源项目,FFmpeg,ffmpeg多媒体编程)