大概框架:
线程1:ffmpeg视频解码。
线程2:ffmpeg音频解码。
线程3:播放每一帧音频,使用QAudioOutput配合QIODevice来实现。
主线程:绘制每一帧图片,使用QOpenGLWidget的paintGL来实现。
粉丝福利, 免费领取C++音视频学习资料包+学习路线大纲、技术视频/代码,内容包括(音视频开发,面试题,FFmpeg ,webRTC ,rtmp ,hls ,rtsp ,ffplay ,编解码,推拉流,srs)↓↓↓↓↓↓见下面↓↓文章底部点击免费领取↓↓
视频解码:
bool FFmpegThread::init() { char *filepath = m_filename.toUtf8().data(); av_register_all(); avformat_network_init(); pFormatCtx = avformat_alloc_context(); // 打开视频文件,初始化pFormatCtx结构 if(avformat_open_input(&pFormatCtx,filepath,NULL,NULL)!=0){ qDebug("视频文件打开失败. "); return -1; } // 获取音视频流 if(avformat_find_stream_info(pFormatCtx,NULL)<0){ qDebug("媒体流获取失败. "); return -1; } videoindex = -1; //nb_streams视音频流的个数,这里当查找到视频流时就中断了。 for(int i=0; i<pFormatCtx->nb_streams; i++) if(pFormatCtx->streams[i]->codec->codec_type==AVMEDIA_TYPE_VIDEO){ videoindex=i; break; } if(videoindex==-1){ qDebug("找不到视频流. "); return -1; } //获取视频流编码结构 pCodecCtx=pFormatCtx->streams[videoindex]->codec; float frameNum = pCodecCtx->framerate.num; // 每秒帧数 if(frameNum>100) frameNum = frameNum/1001; frameRate = 1000/frameNum; fps = av_q2d(pFormatCtx->streams[videoindex]->avg_frame_rate); qDebug("帧/秒 = %f 播放间隔是时间=%d ",frameNum,frameRate); //查找解码器 pCodec=avcodec_find_decoder(pCodecCtx->codec_id); if(pCodec==NULL) { qDebug("找不到解码器. "); return -1; } //使用解码器读取pCodecCtx结构 if(avcodec_open2(pCodecCtx, pCodec,NULL)<0) { qDebug("打开视频码流失败. "); return -1; } //创建帧结构,此函数仅分配基本结构空间,图像数据空间需通过av_malloc分配 pFrame = av_frame_alloc(); pFrameRGB = av_frame_alloc(); //创建动态内存,创建存储图像数据的空间 unsigned char *out_buffer = (unsigned char *)av_malloc(av_image_get_buffer_size(AV_PIX_FMT_RGB32, pCodecCtx->width, pCodecCtx->height, 1)); av_image_fill_arrays(pFrameRGB->data, pFrameRGB->linesize, out_buffer, AV_PIX_FMT_RGB32, pCodecCtx->width, pCodecCtx->height, 1); packet = (AVPacket *)av_malloc(sizeof(AVPacket)); //初始化img_convert_ctx结构 qDebug()<<pCodecCtx->width<<pCodecCtx->height; img_convert_ctx = sws_getContext(pCodecCtx->width, pCodecCtx->height, pCodecCtx->pix_fmt, pCodecCtx->width, pCodecCtx->height, AV_PIX_FMT_RGB32, SWS_BICUBIC, NULL, NULL, NULL); return true; } void FFmpegThread::run() { static int s_cnt = 0; if (isPlay1st) //根据标志位执行初始化操作 { if (!this->init()) { qDebug() << "Err: init FFmpeg-video fail."; stopped = -1; goto OVER; // return; } isPlay1st = false; stopped = 1; s_cnt = 0; } while (stopped > 0) { if (av_read_frame(pFormatCtx, packet) < 0) { msleep(1); if (_isLoopMode) { if (++s_cnt > 677) { s_cnt = 0; qDebug() << "**** maybe-over, try once more " << s_cnt; g_AudioPlayThread->reStart(); av_seek_frame(pFormatCtx, -1,0,0); emit maybeFinished(); } } continue; } s_cnt = 0; if (packet->stream_index == videoindex) { //解码一帧视频数据 int ret = avcodec_decode_video2(pCodecCtx, pFrame, &got_picture, packet); if (ret < 0){ qDebug("解码失败. "); return; } if (got_picture){ sws_scale(img_convert_ctx, (const unsigned char* const*)pFrame->data, pFrame->linesize, 0, pCodecCtx->height, pFrameRGB->data, pFrameRGB->linesize); QImage img((uchar*)pFrameRGB->data[0],pCodecCtx->width,pCodecCtx->height,QImage::Format_RGB32); img = img.scaled(vedioW, vedioH); // QPixmap temp = QPixmap::fromImage(img); // vedioBuff.append(temp); emit receiveImage(img.copy()); ///音视频同步 double vs = pFrame->pts*av_q2d(pFormatCtx->streams[videoindex]->time_base); double as = g_AudioPlayThread->getcurFps(); // qDebug()<<vs<<as<<frameRate<<aStatus; //解码耗时 double extra_delay = pFrame->repeat_pict / (2*fps); double fps_delay = 1.0 / fps; double real_delay = extra_delay + fps_delay; double diff_time = vs - as; if (diff_time > 0) { av_usleep((diff_time + real_delay) * 1000 * 1000); } else{ //audio faster if (fabs(diff_time) >= 0.05) { //丢帧 av_frame_unref(pFrame); continue; } } } } av_free_packet(packet); } OVER: sws_freeContext(img_convert_ctx); av_frame_free(&pFrameRGB); av_frame_free(&pFrame); avcodec_close(pCodecCtx); avformat_close_input(&pFormatCtx); stopped = 0; isPlay1st = true; //false; s_cnt = 0; qDebug() << "stop ffmpeg thread"; }
音频解码
bool MusicDecodecThread::openAudioFile(QString fileName) { char *filepath = fileName.toUtf8().data(); av_register_all(); avformat_network_init(); pFormatCtx = avformat_alloc_context(); // 打开视频文件,初始化pFormatCtx结构 if(avformat_open_input(&pFormatCtx,filepath,NULL,NULL)!=0){ qDebug("视频文件打开失败. "); return -1; } // 获取音视频流 if(avformat_find_stream_info(pFormatCtx,NULL)<0){ qDebug("媒体流获取失败. "); return -1; } audioindex = -1; //nb_streams视音频流的个数,这里当查找到音频流时就中断了。 for(int i=0; i<pFormatCtx->nb_streams; i++) if(pFormatCtx->streams[i]->codecpar->codec_type==AVMEDIA_TYPE_AUDIO){ audioindex=i; break; } if(audioindex==-1){ qDebug("找不到音频流. "); return -1; } //获取视频流编码结构 pCodecCtx=pFormatCtx->streams[AVMEDIA_TYPE_VIDEO]->codec; float frameNum = pCodecCtx->framerate.num; // 每秒帧数 if(frameNum>100) frameNum = frameNum/1001; frameRate = 1000/frameNum; qDebug("帧/秒 = %f 播放间隔是时间=%d ",frameNum,frameRate); //获取音频流编码结构------------------------------------------------------------- aCodecParameters = pFormatCtx->streams[audioindex]->codecpar; aCodec = avcodec_find_decoder(aCodecParameters->codec_id); if (aCodec == 0) { qDebug("找不到解码器. "); return -1; } aCodecCtx = avcodec_alloc_context3(aCodec); avcodec_parameters_to_context(aCodecCtx, aCodecParameters); //使用解码器读取aCodecCtx结构 if (avcodec_open2(aCodecCtx, aCodec, 0) < 0) { qDebug("打开视频码流失败. "); return 0; } int rate = pFormatCtx->streams[audioindex]->codec->sample_rate; int channel = pFormatCtx->streams[audioindex]->codec->channels; g_AudioPlayThread->cleanAllAudioBuffer(); g_AudioPlayThread->setCurrentSampleInfo(rate, 16, channel); //创建帧结构,此函数仅分配基本结构空间,图像数据空间需通过av_malloc分配 pFrame = av_frame_alloc(); // 获取音频参数 uint64_t out_channel_layout = aCodecCtx->channel_layout; out_sample_fmt = AV_SAMPLE_FMT_S16; int out_sample_rate = aCodecCtx->sample_rate; out_channels = av_get_channel_layout_nb_channels(out_channel_layout); audio_out_buffer = (uint8_t *)av_malloc(MAX_AUDIO_FRAME_SIZE*2); swr_ctx = swr_alloc_set_opts(NULL, out_channel_layout, out_sample_fmt,out_sample_rate, aCodecCtx->channel_layout, aCodecCtx->sample_fmt, aCodecCtx->sample_rate, 0, 0); swr_init(swr_ctx); packet = (AVPacket *)av_malloc(sizeof(AVPacket)); return true; } void MusicDecodecThread::run(void) { static int s_cnt = 0; while (1) { if (av_read_frame(pFormatCtx, packet) < 0) { continue; } s_cnt = 0; if (packet->stream_index == audioindex) { int ret = avcodec_decode_audio4(aCodecCtx, pFrame, &got_audio, packet); if ( ret < 0) { qDebug("解码失败. "); return; } if (got_audio) { int len = swr_convert(swr_ctx, &audio_out_buffer, MAX_AUDIO_FRAME_SIZE, (const uint8_t **)pFrame->data, pFrame->nb_samples); if (len <= 0) { qDebug("解码失败1. "); continue; } int dst_bufsize = av_samples_get_buffer_size(0, out_channels, len, out_sample_fmt, 1); QByteArray atemp = QByteArray((const char *)audio_out_buffer, dst_bufsize); g_AudioPlayThread->setAudioData(atemp,pFrame->pts*av_q2d(pFormatCtx->streams[audioindex]->time_base)); } } av_free_packet(packet); } OVER: s_cnt = 0; av_frame_free(&pFrame); avcodec_close(pCodecCtx); avformat_close_input(&pFormatCtx); }
加载音频数据播放
void AudioPlayThread::setCurrentSampleInfo(int sampleRate, int sampleSize, int channelCount) { QAudioFormat fmt; fmt.setSampleRate(sampleRate); fmt.setSampleSize(sampleSize); fmt.setChannelCount(channelCount); fmt.setCodec("audio/pcm"); fmt.setByteOrder(QAudioFormat::LittleEndian); fmt.setSampleType(QAudioFormat::SignedInt); m_OutPut = new QAudioOutput(fmt); m_AudioIo = m_OutPut->start(); qDebug()<<"audio:"<<"Rate;"<<sampleRate<<"Size:"<<sampleSize<<"channelCount:"<<channelCount<<m_AudioIo; // this->start(); } void AudioPlayThread::run(void) { while (!this->isInterruptionRequested()) { if (!m_IsPlaying) { QThread::msleep(10); continue; } if (dataList.size()<=_index) { QThread::msleep(10); m_IsPlaying = false; // qDebug()<<_index<<m_curFps; continue; } ///音视频同步 if (m_OutPut->bytesFree() >= m_OutPut->periodSize()) { int size = m_OutPut->periodSize(); while(m_PCMDataBuffer.size()-m_CurrentPlayIndex<size) { if (dataList.size()>_index) { m_curFps = fpsList[_index]; m_PCMDataBuffer.append(dataList[_index++]); } else { size=m_PCMDataBuffer.size(); break; } } // qDebug()<<"---m_curFps:"<<m_curFps; char *writeData = new char[m_OutPut->periodSize()]; memcpy(writeData, &m_PCMDataBuffer.data()[m_CurrentPlayIndex],size); m_AudioIo->write(writeData,size); m_CurrentPlayIndex+=size; delete []writeData; } } } void AudioPlayThread::cleanAllAudioBuffer(void) { m_CurrentPlayIndex = 0; dataList.clear(); fpsList.clear(); m_PCMDataBuffer.clear(); m_IsPlaying = false; _index = 0; } void AudioPlayThread::playMusic(bool status) { m_IsPlaying = status; } bool AudioPlayThread::getPlayMusicStatus(void) { return m_IsPlaying; } void AudioPlayThread::setAudioData(QByteArray data, double fps) { dataList.append(data); fpsList.append(fps); m_IsPlaying = true; } double AudioPlayThread::getcurFps() { return m_curFps; } //清除重新开始 void AudioPlayThread::reStart() { m_IsPlaying = true; m_curFps = 0; _index = 0; m_CurrentPlayIndex = 0; m_PCMDataBuffer.clear(); }
加载每一帧图片,播放视频:
void MPlayer::loadInfo() { m_vlcThread->setLoopPlaypack(); //开启循环播放 m_audioThread->setLoopPlaypack(); //开启循环播放 m_vlcThread->loadInfo(m_fileName,720,1280); m_audioThread->openAudioFile(m_fileName); m_vlcThread->start(); m_audioThread->start(); emit sig_Play(true); } void MPlayer::updateFrame(const QImage &image) { if (!this->isFullScreen()) this->showFullScreen(); m_img = image; update(); } void MPlayer::paintGL() { QPainter p(this); p.drawImage(this->rect(),m_img); }
main主函数调用:
int main(int argc, char *argv[]) { QApplication a(argc, argv); QString url; if (argc > 1) { url = argv[1]; qDebug()<<argc<<argv[0]<<argv[1]; } MPlayer w; w.setUrl(url); w.loadInfo(); w.show(); return a.exec(); }
使用ffmpeg播放音频和视频都很简单,有多种播放方式,甚至一个类就能实现音视频播放,但是音视频同步的逻辑就比较复杂了。
当前音视频同步逻辑:首先ffmpeg中音频比视频解码慢,如果没有延迟的情况下,视频比音频快很多。所以我们需要在视频解码中来处理同步逻辑。我这个地方开了三个线程,音频线程没有延迟,所有会在视频播放过程中,将所有音频解码完毕,并将每帧对应的音频数据和当前帧数分别存入一个链表中。然后在音频播放线程中,会从链表取一个periodSize大小的数据,在取得过程中会记录当前取到哪一帧数据了,然后将periodSize大小的数据写入输出设备进行播放,这样,我们能够知道当前音频数据具体播放到哪一帧了。最后在视频解码线程中,获取音频播放到哪一帧,和自己做对比来进行同步。
粉丝福利, 免费领取C++音视频学习资料包+学习路线大纲、技术视频/代码,内容包括(音视频开发,面试题,FFmpeg ,webRTC ,rtmp ,hls ,rtsp ,ffplay ,编解码,推拉流,srs)↓↓↓↓↓↓见下面↓↓文章底部点击免费领取↓↓