精华内容
下载资源
问答
  • ffmpeg -fflags nobuffer -f dshow -i audio="麦克风 (2- Realtek High Definition Audio)" -acodec aac -f flv rtmp://localhost:1935/live/test1
    ffmpeg -fflags nobuffer -f dshow -i audio="麦克风 (2- Realtek High Definition Audio)" -acodec aac -f flv rtmp://localhost:1935/live/test1
    
    展开全文
  •      FFmpeg+Qt,实现录制本地的麦克风音频后进行UDP的组播推流。并由推流组播地址进行拉流,拉流后进行音频的播放。在推流部分增加了可选噪声控制,于是在拉流端播放后可听到噪音干扰。   预览 视频...
    录制本地麦克风音频进行UDP推流,并进行拉流音频播放 

         FFmpeg+Qt,实现录制本地的麦克风音频后进行UDP的组播推流。并由推流组播地址进行拉流,拉流后进行音频的播放。在推流部分增加了可选噪声控制,于是在拉流端播放后可听到噪音干扰。

    预览


    视频出处

    代码
    /*
    推流:
    	推流的音频采集是使用Qt的音频接口来获取。
    */
    
    #define QMSG(str) 
    展开全文
  • ffmpeg 采集摄像头和麦克风,并推流

    千次阅读 2020-06-04 13:57:26
    ffmpeg 读取摄像头和麦克风 并进行rtmp推流 音视频直播 的简单应用

    参考网上的代码,实现了音视频直播推流

    但还是存在部分问题:延时和图像声音不匹配。

    参考:

    https://blog.csdn.net/n_fly/article/details/90899094  (上篇博客发送图像没声音没图像,看懂了这篇帖子是解决问题的关键因素)

    https://blog.csdn.net/n_fly/article/details/90899094  (这篇博客博主上传的代码和贴出来的不一样,不过给了我继续解决问题的信心)

    最后希望大家能多多分享自己的学习经验,解决这个问题我花了快一个月。 还是通过qian的方式获得了源码。知识付费啊,CSDN的太贵了。

    /*
    from: tanbo - jlrmyy
    摄像头采集和麦克风录音,并用nginx推流
    后期再换成ffmpeg4.0接口
    存在问题:
    1.延时
    2.图像声音不匹配
    */
    
    #include "stdafx.h"
    
    #ifdef	__cplusplus
    extern "C"
    {
    #endif
    #include "libavcodec/avcodec.h"
    #include "libavformat/avformat.h"
    #include "libswscale/swscale.h"
    #include "libavdevice/avdevice.h"
    #include "libavutil/audio_fifo.h"
    #include "libswresample/swresample.h"
    
    #pragma comment(lib, "avcodec.lib")
    #pragma comment(lib, "avformat.lib")
    #pragma comment(lib, "avutil.lib")
    #pragma comment(lib, "avdevice.lib")
    #pragma comment(lib, "avfilter.lib")
    
    //#pragma comment(lib, "avfilter.lib")
    //#pragma comment(lib, "postproc.lib")
    #pragma comment(lib, "swresample.lib")
    #pragma comment(lib, "swscale.lib")
    #ifdef __cplusplus
    };
    #endif
    
    AVFormatContext	*pFormatCtx_Video = NULL, *pFormatCtx_Audio = NULL, *pFormatCtx_Out = NULL;
    AVCodecContext	*pCodecCtx_Video;
    AVCodec			*pCodec_Video;
    AVFifoBuffer	*fifo_video = NULL;
    AVAudioFifo		*fifo_audio = NULL;
    int VideoIndex, AudioIndex;
    
    CRITICAL_SECTION AudioSection, VideoSection;
    
    
    
    SwsContext *img_convert_ctx;
    struct SwrContext *au_convert_ctx;
    int frame_size = 0;
    
    
    uint8_t *picture_buf = NULL, *frame_buf = NULL;
    
    bool bCap = true;
    
    DWORD WINAPI ScreenCapThreadProc( LPVOID lpParam );
    DWORD WINAPI AudioCapThreadProc( LPVOID lpParam );
    static char *dup_wchar_to_utf8(wchar_t *w)
    {
    	char *s = NULL;
    	int l = WideCharToMultiByte(CP_UTF8, 0, w, -1, 0, 0, 0, 0);
    	s = (char *)av_malloc(l);
    	if (s)
    		WideCharToMultiByte(CP_UTF8, 0, w, -1, s, l, 0, 0);
    	return s;
    }
    int OpenVideoCapture()
    {
    	AVInputFormat *ifmt = av_find_input_format("dshow");
    
    	//Set own video device's name
    	char * psCameraName = dup_wchar_to_utf8(L"video=USB2.0 PC CAMERA");
    
    	//AVInputFormat *ifmt=av_find_input_format("gdigrab");
    	//这里可以加参数打开,例如可以指定采集帧率
    	AVDictionary *options = NULL;
    	av_dict_set(&options, "rtbufsize", "30412800", 0);//默认大小3041280
    	//av_dict_set(&options, "framerate", "15", NULL);
    	//av_dict_set(&options,"offset_x","20",0);
    	//The distance from the top edge of the screen or desktop
    	//av_dict_set(&options,"offset_y","40",0);
    	//Video frame size. The default is to capture the full screen
    	//av_dict_set(&options,"video_size","320x240",0);
    	//if(avformat_open_input(&pFormatCtx_Video, "desktop", ifmt, &options)!=0)
    	if (avformat_open_input(&pFormatCtx_Video, psCameraName, ifmt, &options) != 0)
    	{
    		printf("Couldn't open input stream.(无法打开视频输入流)\n");
    		return -1;
    	}
    	if(avformat_find_stream_info(pFormatCtx_Video,NULL)<0)
    	{
    		printf("Couldn't find stream information.(无法获取视频流信息)\n");
    		return -1;
    	}
    	if (pFormatCtx_Video->streams[0]->codec->codec_type != AVMEDIA_TYPE_VIDEO)
    	{
    		printf("Couldn't find video stream information.(无法获取视频流信息)\n");
    		return -1;
    	}
    	pCodecCtx_Video = pFormatCtx_Video->streams[0]->codec;
    	pCodec_Video = avcodec_find_decoder(pCodecCtx_Video->codec_id);
    	if(pCodec_Video == NULL)
    	{
    		printf("Codec not found.(没有找到解码器)\n");
    		return -1;
    	}
    	if(avcodec_open2(pCodecCtx_Video, pCodec_Video, NULL) < 0)
    	{
    		printf("Could not open codec.(无法打开解码器)\n");
    		return -1;
    	}
    
    	
    
    	img_convert_ctx = sws_getContext(pCodecCtx_Video->width, pCodecCtx_Video->height, pCodecCtx_Video->pix_fmt, 
    		pCodecCtx_Video->width, pCodecCtx_Video->height, PIX_FMT_YUV420P, SWS_BICUBIC, NULL, NULL, NULL); 
    
    	frame_size = avpicture_get_size(pCodecCtx_Video->pix_fmt, pCodecCtx_Video->width, pCodecCtx_Video->height);
    	//申请30帧缓存
    	fifo_video = av_fifo_alloc(30 * avpicture_get_size(AV_PIX_FMT_YUV420P, pCodecCtx_Video->width, pCodecCtx_Video->height));
    
    	return 0;
    }
    
    int OpenAudioCapture()
    {
    	//查找输入方式
    	AVInputFormat *pAudioInputFmt = av_find_input_format("dshow");
    	
    	//以Direct Show的方式打开设备,并将 输入方式 关联到格式上下文
    	char * psDevName = dup_wchar_to_utf8(L"audio=麦克风 (Realtek High Definition Au");
    
    	if (avformat_open_input(&pFormatCtx_Audio, psDevName, pAudioInputFmt,NULL) < 0)
    	{
    		printf("Couldn't open input stream.(无法打开音频输入流)\n");
    		return -1;
    	}
    
    	if(avformat_find_stream_info(pFormatCtx_Audio,NULL)<0)  
    		return -1; 
    	
    	if(pFormatCtx_Audio->streams[0]->codec->codec_type != AVMEDIA_TYPE_AUDIO)
    	{
    		printf("Couldn't find video stream information.(无法获取音频流信息)\n");
    		return -1;
    	}
    
    	AVCodec *tmpCodec = avcodec_find_decoder(pFormatCtx_Audio->streams[0]->codec->codec_id);
    	if(0 > avcodec_open2(pFormatCtx_Audio->streams[0]->codec, tmpCodec, NULL))
    	{
    		printf("can not find or open audio decoder!\n");
    	}
    	//Swr
    
    	
    
    	return 0;
    }
    
    int OpenOutPut()
    {
    	AVStream *pVideoStream = NULL, *pAudioStream = NULL;
    	const char *outFileName = "rtmp://localhost:1935/live/room";
    	avformat_alloc_output_context2(&pFormatCtx_Out, NULL, "flv", outFileName);
    	//const char *outFileName = "d:/test.flv";
    	//avformat_alloc_output_context2(&pFormatCtx_Out, NULL, NULL, outFileName);
    
    	if (pFormatCtx_Video->streams[0]->codec->codec_type == AVMEDIA_TYPE_VIDEO)
    	{
    		AVCodecContext *videoCodecCtx;
    		VideoIndex = 0;
    		pVideoStream = avformat_new_stream(pFormatCtx_Out, NULL);
    
    		if (!pVideoStream)
    		{
    			printf("can not new stream for output!\n");
    			return -1;
    		}
    
    		//set codec context param
    		pVideoStream->codec->codec = avcodec_find_encoder(AV_CODEC_ID_H264);
    		pVideoStream->codec->height = pFormatCtx_Video->streams[0]->codec->height;
    		pVideoStream->codec->width = pFormatCtx_Video->streams[0]->codec->width;
    		
    		pVideoStream->codec->me_range = 16;
    		pVideoStream->codec->max_qdiff = 4;
    		pVideoStream->codec->qmin = 10;
    		pVideoStream->codec->qmax = 51;
    		pVideoStream->codec->qcompress = 0.6;
    
    		pVideoStream->codec->time_base = pFormatCtx_Video->streams[0]->codec->time_base;
    		pVideoStream->codec->sample_aspect_ratio = pFormatCtx_Video->streams[0]->codec->sample_aspect_ratio;
    		// take first format from list of supported formats
    		pVideoStream->codec->pix_fmt = pFormatCtx_Out->streams[VideoIndex]->codec->codec->pix_fmts[0];
    
    		//open encoder
    		if (!pVideoStream->codec->codec)
    		{
    			printf("can not find the encoder!\n");
    			return -1;
    		}
    
    		if (pFormatCtx_Out->oformat->flags & AVFMT_GLOBALHEADER)
    			pVideoStream->codec->flags |= CODEC_FLAG_GLOBAL_HEADER;
    
    		if ((avcodec_open2(pVideoStream->codec, pVideoStream->codec->codec, NULL)) < 0)
    		{
    			printf("can not open the encoder\n");
    			return -1;
    		}
    	}
    
    	if(pFormatCtx_Audio->streams[0]->codec->codec_type == AVMEDIA_TYPE_AUDIO)
    	{
    		AVCodecContext *pOutputCodecCtx;
    		AudioIndex = 1;
    		pAudioStream = avformat_new_stream(pFormatCtx_Out, NULL);
    
    		//pAudioStream->codec->codec = avcodec_find_encoder(pFormatCtx_Out->oformat->audio_codec);
    		pAudioStream->codec->codec = avcodec_find_encoder(AV_CODEC_ID_AAC);
    		pOutputCodecCtx = pAudioStream->codec;
    
    		pOutputCodecCtx->sample_rate = pFormatCtx_Audio->streams[0]->codec->sample_rate;
    		pOutputCodecCtx->channel_layout = pFormatCtx_Audio->streams[0]->codec->channel_layout;
    		pOutputCodecCtx->channels = av_get_channel_layout_nb_channels(pAudioStream->codec->channel_layout);
    		if(pOutputCodecCtx->channel_layout == 0)
    		{
    			pOutputCodecCtx->channel_layout = AV_CH_LAYOUT_STEREO;
    			pOutputCodecCtx->channels = av_get_channel_layout_nb_channels(pOutputCodecCtx->channel_layout);
    
    		}
    		//pOutputCodecCtx->sample_fmt = AV_SAMPLE_FMT_S16P;
    		pOutputCodecCtx->sample_fmt = pAudioStream->codec->codec->sample_fmts[0];
    		
    		AVRational time_base={1, pAudioStream->codec->sample_rate};
    		pAudioStream->time_base = time_base;
    
    		pOutputCodecCtx->codec_tag = 0;  
    		if (pFormatCtx_Out->oformat->flags & AVFMT_GLOBALHEADER)  
    			pOutputCodecCtx->flags |= CODEC_FLAG_GLOBAL_HEADER;
    
    		if (avcodec_open2(pOutputCodecCtx, pOutputCodecCtx->codec, 0) < 0)
    		{
    			//编码器打开失败,退出程序
    			return -1;
    		}
    	}
    
    	if (!(pFormatCtx_Out->oformat->flags & AVFMT_NOFILE))
    	{
    		if(avio_open(&pFormatCtx_Out->pb, outFileName, AVIO_FLAG_WRITE) < 0)
    		{
    			printf("can not open output file handle!\n");
    			return -1;
    		}
    	}
    
    	if(avformat_write_header(pFormatCtx_Out, NULL) < 0)
    	{
    		printf("can not write the header of the output file!\n");
    		return -1;
    	}
    
    	return 0;
    }
    
    int _tmain(int argc, _TCHAR* argv[])
    {
    	av_register_all();
    	avdevice_register_all();
    	avformat_network_init();
    	if (OpenVideoCapture() < 0)
    	{
    		return -1;
    	}
    	if (OpenAudioCapture() < 0)
    	{
    		return -1;
    	}
    	if (OpenOutPut() < 0)
    	{
    		return -1;
    	}
    	
    	InitializeCriticalSection(&VideoSection);
    	InitializeCriticalSection(&AudioSection);
    
    	AVFrame *picture = av_frame_alloc();
    	int size = avpicture_get_size(pFormatCtx_Out->streams[VideoIndex]->codec->pix_fmt, 
    		pFormatCtx_Out->streams[VideoIndex]->codec->width, pFormatCtx_Out->streams[VideoIndex]->codec->height);
    	picture_buf = new uint8_t[size];
    
    	avpicture_fill((AVPicture *)picture, picture_buf, 
    		pFormatCtx_Out->streams[VideoIndex]->codec->pix_fmt, 
    		pFormatCtx_Out->streams[VideoIndex]->codec->width, 
    		pFormatCtx_Out->streams[VideoIndex]->codec->height);
    
    	
    	
    
    
    	//star cap screen thread
    	CreateThread( NULL, 0, ScreenCapThreadProc, 0, 0, NULL);
    	//star cap audio thread
    	CreateThread( NULL, 0, AudioCapThreadProc, 0, 0, NULL);
    	int64_t cur_pts_v=0,cur_pts_a=0;
    	int VideoFrameIndex = 0, AudioFrameIndex = 0;
    
    	while(1)
    	{
    		if (_kbhit() != 0 && bCap)
    		{
    			bCap = false;
    			Sleep(2000);//简单的用sleep等待采集线程关闭
    		}
    		if (fifo_audio && fifo_video)
    		{
    			int sizeAudio = av_audio_fifo_size(fifo_audio);
    			int sizeVideo = av_fifo_size(fifo_video);
    			//缓存数据写完就结束循环
    			if (av_audio_fifo_size(fifo_audio) <= pFormatCtx_Out->streams[AudioIndex]->codec->frame_size && 
    				av_fifo_size(fifo_video) <= frame_size && !bCap)
    			{
    				break;
    			}
    		}
    
    		if(av_compare_ts(cur_pts_v, pFormatCtx_Out->streams[VideoIndex]->time_base, 
    			cur_pts_a,pFormatCtx_Out->streams[AudioIndex]->time_base) <= 0)
    		{
    			//read data from fifo
    			if (av_fifo_size(fifo_video) < frame_size && !bCap)
    			{
    				cur_pts_v = 0x7fffffffffffffff;
    			}
    			if(av_fifo_size(fifo_video) >= size)
    			{
    				EnterCriticalSection(&VideoSection);
    				av_fifo_generic_read(fifo_video, picture_buf, size, NULL);
    				LeaveCriticalSection(&VideoSection);
    				
    				avpicture_fill((AVPicture *)picture, picture_buf, 
    					pFormatCtx_Out->streams[VideoIndex]->codec->pix_fmt, 
    					pFormatCtx_Out->streams[VideoIndex]->codec->width, 
    					pFormatCtx_Out->streams[VideoIndex]->codec->height);
    				
    				//pts = n * ((1 / timbase)/ fps);
    				//picture->pts = VideoFrameIndex *AV_TIME_BASE / av_q2d(pFormatCtx_Video->streams[0]->r_frame_rate);
    				picture->pts = VideoFrameIndex * ((pFormatCtx_Video->streams[0]->time_base.den / pFormatCtx_Video->streams[0]->time_base.num) / 15);
    				int got_picture = 0;
    				AVPacket pkt;
    				av_init_packet(&pkt);
    				
    				pkt.data = NULL;
    				pkt.size = 0;
    				int ret = avcodec_encode_video2(pFormatCtx_Out->streams[VideoIndex]->codec, &pkt, picture, &got_picture);
    				if(ret < 0)
    				{
    					//编码错误,不理会此帧
    					continue;
    				}
    				
    				if (got_picture==1)
    				{
    					int cal_duration = av_q2d(pFormatCtx_Video->streams[0]->r_frame_rate);
     					pkt.stream_index = VideoIndex;
    					pkt.pts = av_rescale_q_rnd(picture->pts , pFormatCtx_Video->streams[0]->time_base,
    						pFormatCtx_Out->streams[VideoIndex]->time_base, (AVRounding)(AV_ROUND_NEAR_INF|AV_ROUND_PASS_MINMAX));  
    					pkt.dts = av_rescale_q_rnd(pkt.dts, pFormatCtx_Video->streams[0]->time_base,
    						pFormatCtx_Out->streams[VideoIndex]->time_base, (AVRounding)(AV_ROUND_NEAR_INF | AV_ROUND_PASS_MINMAX)); ;
    
    					pkt.duration = ((pFormatCtx_Out->streams[0]->time_base.den / pFormatCtx_Out->streams[0]->time_base.num) / 15);
    
    					cur_pts_v = pkt.pts;
    
    					ret = av_interleaved_write_frame(pFormatCtx_Out, &pkt);
    					//delete[] pkt.data;
    					av_free_packet(&pkt);
    					VideoFrameIndex++;
    				}
    				
    			}
    		}
    		else
    		{
    			if (NULL == fifo_audio)
    			{
    				continue;//还未初始化fifo
    			}
    			if (av_audio_fifo_size(fifo_audio) < pFormatCtx_Out->streams[AudioIndex]->codec->frame_size && !bCap)
    			{
    				cur_pts_a = 0x7fffffffffffffff;
    			}
    			if(av_audio_fifo_size(fifo_audio) >= 
    				(pFormatCtx_Out->streams[AudioIndex]->codec->frame_size > 0 ? pFormatCtx_Out->streams[AudioIndex]->codec->frame_size : 1024))
    			{
    				AVFrame *frame;
    				frame = av_frame_alloc();
    				frame->nb_samples = pFormatCtx_Out->streams[AudioIndex]->codec->frame_size>0 ? pFormatCtx_Out->streams[AudioIndex]->codec->frame_size: 1024;
    				frame->channel_layout = pFormatCtx_Out->streams[AudioIndex]->codec->channel_layout;
    				frame->format = pFormatCtx_Out->streams[AudioIndex]->codec->sample_fmt;
    				frame->sample_rate = pFormatCtx_Out->streams[AudioIndex]->codec->sample_rate;
    				av_frame_get_buffer(frame, 0);
    
    				EnterCriticalSection(&AudioSection);
    				av_audio_fifo_read(fifo_audio, (void **)frame->data, 
    					(pFormatCtx_Out->streams[AudioIndex]->codec->frame_size > 0 ? pFormatCtx_Out->streams[AudioIndex]->codec->frame_size : 1024));
    				LeaveCriticalSection(&AudioSection);
    
    				int audio_framesize = frame->nb_samples;
    
    				AVPacket pkt_out;
    				av_init_packet(&pkt_out);
    				int got_picture = -1;
    				pkt_out.data = NULL;
    				pkt_out.size = 0;
    
    				frame->pts = AudioFrameIndex * pFormatCtx_Out->streams[AudioIndex]->codec->frame_size;
    				if (avcodec_encode_audio2(pFormatCtx_Out->streams[AudioIndex]->codec, &pkt_out, frame, &got_picture) < 0)
    				{
    					printf("can not decoder a frame");
    				}
    				av_frame_free(&frame);
    				if (got_picture) 
    				{
    					//AVRational time_base = pFormatCtx_Out->streams[AudioIndex]->time_base;//{ 1, 1000 };
    					//AVRational time_base_q = { 1, AV_TIME_BASE };
    					//pkt_out.pts = av_rescale_q(AudioFrameIndex * pFormatCtx_Out->streams[AudioIndex]->codec->frame_size, time_base_q, time_base);
    					//pkt_out.dts = pkt_out.pts;
    					//pkt_out.duration = av_rescale_q(pFormatCtx_Out->streams[AudioIndex]->codec->frame_size, time_base_q, time_base); //(double)(calc_duration)*(double)(av_q2d(time_base_q)) / (double)(av_q2d(time_base));
    
    
    
    					pkt_out.stream_index = AudioIndex;
    					//pkt_out.pts = AudioFrameIndex * pFormatCtx_Out->streams[AudioIndex]->codec->frame_size;
    					pkt_out.pts = av_rescale_q_rnd(AudioFrameIndex * audio_framesize, pFormatCtx_Out->streams[AudioIndex]->codec->time_base,
    						pFormatCtx_Out->streams[AudioIndex]->time_base, (AVRounding)(AV_ROUND_NEAR_INF | AV_ROUND_PASS_MINMAX));
    					pkt_out.dts = pkt_out.pts;
    					pkt_out.duration = av_rescale_q_rnd(audio_framesize, pFormatCtx_Out->streams[AudioIndex]->codec->time_base,
    						pFormatCtx_Out->streams[AudioIndex]->time_base, (AVRounding)(AV_ROUND_NEAR_INF | AV_ROUND_PASS_MINMAX));
    					cur_pts_a = pkt_out.pts;
    					
    					int ret = av_interleaved_write_frame(pFormatCtx_Out, &pkt_out);
    					av_free_packet(&pkt_out);
    					AudioFrameIndex++;
    				}
    				
    			}
    		}
    	}
    
    	delete[] picture_buf;
    
    	av_fifo_free(fifo_video);
    	av_audio_fifo_free(fifo_audio);
    
    	av_write_trailer(pFormatCtx_Out);
    
    	avio_close(pFormatCtx_Out->pb);
    	avformat_free_context(pFormatCtx_Out);
    
    	if (pFormatCtx_Video != NULL)
    	{
    		avformat_close_input(&pFormatCtx_Video);
    		pFormatCtx_Video = NULL;
    	}
    	if (pFormatCtx_Audio != NULL)
    	{
    		avformat_close_input(&pFormatCtx_Audio);
    		pFormatCtx_Audio = NULL;
    	}
    
    	return 0;
    }
    
    DWORD WINAPI ScreenCapThreadProc( LPVOID lpParam )
    {
    	AVPacket packet;/* = (AVPacket *)av_malloc(sizeof(AVPacket))*/;
    	int got_picture;
    	AVFrame	*pFrame;
    	pFrame=avcodec_alloc_frame();
    
    	AVFrame *picture = avcodec_alloc_frame();
    	int size = avpicture_get_size(pFormatCtx_Out->streams[VideoIndex]->codec->pix_fmt, 
    		pFormatCtx_Out->streams[VideoIndex]->codec->width, pFormatCtx_Out->streams[VideoIndex]->codec->height);
    	//picture_buf = new uint8_t[size];
    
    	avpicture_fill((AVPicture *)picture, picture_buf, 
    		pFormatCtx_Out->streams[VideoIndex]->codec->pix_fmt, 
    		pFormatCtx_Out->streams[VideoIndex]->codec->width, 
    		pFormatCtx_Out->streams[VideoIndex]->codec->height);
    
    	av_init_packet(&packet);
    	int height = pFormatCtx_Out->streams[VideoIndex]->codec->height;
    	int width = pFormatCtx_Out->streams[VideoIndex]->codec->width;
    	int y_size=height*width;
    	while(bCap)
    	{
    		packet.data = NULL;
    		packet.size = 0;
    		if (av_read_frame(pFormatCtx_Video, &packet) < 0)
    		{
    			continue;
    		}
    		if(packet.stream_index == 0)
    		{
    			if (avcodec_decode_video2(pCodecCtx_Video, pFrame, &got_picture, &packet) < 0)
    			{
    				printf("Decode Error.(解码错误)\n");
    				continue;
    			}
    			if (got_picture)
    			{
    				sws_scale(img_convert_ctx, (const uint8_t* const*)pFrame->data, pFrame->linesize, 0, 
    					pFormatCtx_Out->streams[VideoIndex]->codec->height, picture->data, picture->linesize);
    				
    				if (av_fifo_space(fifo_video) >= size)
    				{
    					EnterCriticalSection(&VideoSection);					
    					av_fifo_generic_write(fifo_video, picture->data[0], y_size, NULL);
    					av_fifo_generic_write(fifo_video, picture->data[1], y_size/4, NULL);
    					av_fifo_generic_write(fifo_video, picture->data[2], y_size/4, NULL);
    					LeaveCriticalSection(&VideoSection);
    				}
    			}
    		}
    		av_free_packet(&packet);
    		//Sleep(50);
    	}
    	av_frame_free(&pFrame);
    	av_frame_free(&picture);
    	//delete[] picture_buf;
    	return 0;
    }
    
    DWORD WINAPI AudioCapThreadProc( LPVOID lpParam )
    {
    	AVPacket pkt;
    	AVFrame *frame;
    	frame = av_frame_alloc();
    	int gotframe;
    	while(bCap)
    	{
    		pkt.data = NULL;
    		pkt.size = 0;
    		if(av_read_frame(pFormatCtx_Audio,&pkt) < 0)
    		{
    			continue;
    		}
    		
    		if (avcodec_decode_audio4(pFormatCtx_Audio->streams[0]->codec, frame, &gotframe, &pkt) < 0)
    		{
    			av_frame_free(&frame);
    			printf("can not decoder a frame");
    			break;
    		}
    		av_free_packet(&pkt);
    
    		if (!gotframe)
    		{
    			continue;//没有获取到数据,继续下一次
    		}
    		if (NULL == fifo_audio)
    		{
    			fifo_audio = av_audio_fifo_alloc(pFormatCtx_Audio->streams[0]->codec->sample_fmt,
    				pFormatCtx_Audio->streams[0]->codec->channels, 30 * frame->nb_samples);
    		}
    
    		if (pFormatCtx_Out->streams[AudioIndex]->codec->sample_fmt != pFormatCtx_Audio->streams[0]->codec->sample_fmt
    			|| pFormatCtx_Out->streams[AudioIndex]->codec->channels != pFormatCtx_Audio->streams[0]->codec->channels
    			|| pFormatCtx_Out->streams[AudioIndex]->codec->sample_rate != pFormatCtx_Audio->streams[0]->codec->sample_rate)
    		{
    			if (frame->channels > 0 && frame->channel_layout == 0)
    				frame->channel_layout = av_get_default_channel_layout(frame->channels);
    			else if (frame->channels == 0 && frame->channel_layout > 0)
    				frame->channels = av_get_channel_layout_nb_channels(frame->channel_layout);
    
    			au_convert_ctx = swr_alloc();
    			au_convert_ctx = swr_alloc_set_opts(au_convert_ctx,
    				frame->channel_layout, pFormatCtx_Out->streams[AudioIndex]->codec->sample_fmt, pFormatCtx_Out->streams[AudioIndex]->codec->sample_rate,
    				pFormatCtx_Out->streams[AudioIndex]->codec->channel_layout, pFormatCtx_Audio->streams[0]->codec->sample_fmt, pFormatCtx_Audio->streams[0]->codec->sample_rate,
    				0,
    				NULL);
    
    			int ret = swr_init(au_convert_ctx);
    
    			如果输入和输出的音频格式不一样 需要重采样,这里是一样的就没做
    			int64_t dst_nb_samples = av_rescale_rnd(
    				swr_get_delay(au_convert_ctx, frame->sample_rate) + frame->nb_samples,
    				frame->sample_rate,
    				frame->sample_rate,
    				AV_ROUND_UP);
    			申请足够的buffer空间
    			//int upperBoundSamples = swr_get_out_samples(au_convert_ctx, frame->nb_samples * frame->channels);
    			//short* pBuffer = new short[upperBoundSamples];
    
    			uint8_t **converted_samples;
    			converted_samples = (uint8_t **)calloc(pFormatCtx_Out->streams[AudioIndex]->codec->channels,
    				sizeof(converted_samples));
    			av_samples_alloc(converted_samples, NULL,
    				pFormatCtx_Out->streams[AudioIndex]->codec->channels,
    				frame->nb_samples,
    				pFormatCtx_Out->streams[AudioIndex]->codec->sample_fmt, 0);
    			//swr_convert(au_convert_ctx, pFrameMP3->data, pFrameMP3->nb_samples, (const uint8_t**)m_ain, pFrame_audio->nb_samples);
    			int nb = swr_convert(au_convert_ctx,
    				converted_samples, dst_nb_samples,
    				(const uint8_t**)frame->extended_data, frame->nb_samples);
    
    			int data_size = frame->channels * nb * av_get_bytes_per_sample(pFormatCtx_Out->streams[AudioIndex]->codec->sample_fmt);
    
    			EnterCriticalSection(&AudioSection);
    			av_audio_fifo_realloc(fifo_audio, av_audio_fifo_size(fifo_audio) + dst_nb_samples);
    			av_audio_fifo_write(fifo_audio, (void **)converted_samples, dst_nb_samples);
    			LeaveCriticalSection(&AudioSection);
    			av_freep(&converted_samples[0]);
    		}
    		else
    		{
    			int buf_space = av_audio_fifo_space(fifo_audio);
    			if (av_audio_fifo_space(fifo_audio) >= frame->nb_samples)
    			{
    				EnterCriticalSection(&AudioSection);
    				av_audio_fifo_write(fifo_audio, (void **)frame->data, frame->nb_samples);
    				LeaveCriticalSection(&AudioSection);
    			}
    		}
    
    		
    	}
    	av_frame_free(&frame);
    	return 0;
    }

     

    展开全文
  • 网上关于推流麦克风设备的多半是Windows环境下的,而Ubuntu下的少之又少,根本没法用,比如Windows下推麦克风ffmpeg命令是: ffmpeg -f dshow -i audio="麦克风 (AUDIO 2.0)" -f flv rtmp://192.168.1.***/mylive...

    ffmpeg对新手来说可以说是真的难用,太坑了。网上关于推流麦克风设备的多半是Windows环境下的,而Ubuntu下的少之又少,根本没法用,比如Windows下推麦克风的ffmpeg命令是:

    ffmpeg -f dshow -i audio="麦克风 (AUDIO 2.0)" -f flv rtmp://192.168.1.***/mylive/test

    其中麦克风 (AUDIO 2.0)是设备管理中麦克风的设备名称,但是换到Ubuntu中就说dshow这个命令找不到,这就让人非常爆炸了。幸好群里有大佬告诉我这个链接,才让我找到解决方案。

    https://trac.ffmpeg.org/wiki/Capture/ALSA

    调试成功的命令行是:

    ffmpeg -f alsa -i default -f flv rtmp://192.168.1.***/mylive/test

    在Linux中使用alsa来作为输入格式,-i中我最后使用default获取到了USB麦克风设备,当然这个是不一定的,也有可能是其他设备,比如hw:0之类的,我的反正用default就获取到了。之后的-f是输出格式,这里因为要rtmp推流,因此采用了flv,其他格式我试过,推流都报错失败了。最后是rtmp链接参数,指定rtmp服务器的地址。

     

    然后是推送PCM文件,PCM文件的来源是我麦克风通过PyAudio录制到本地的,PCM文件是未经压缩过的音频文件,文件体积非常大,比如我把4.3MB的mp3音频文件转为PCM文件,得到的文件足足有51.4MB!我现在要把PCM推送出去,然后文件就出来了,到底用什么编码器,查了很久的资料才知道ffmpeg命令应该这么写:

    ffmpeg -f s16le -ac 2 -ar 44100 -acodec pcm_s16le -i /home/dct/Videos/mp3_441.pcm -f flv rtmp://192.168.1.182/mylive/mp3

     

    其中第一个-f是使用什么格式读取,这里是s16le,-ac的值2代表有两条音轨。-ar的值44100表示采样率是44100Hz,也就是一秒内采样44100次。-acodec的值表示用什么编码器,这里是pcm_s16le,pcm_s16le中的16表示采样位数为16,也就是声音的值可以是0~65535。第二个-f是输出格式,这里因为要rtmp推流,因此采用了flv,其他格式我试过,推流都报错失败了。最后是rtmp链接参数,指定rtmp服务器的地址。

     

    展开全文
  • 安装virtual-audio-capturer 下载地址 ...可以下载源码自己编译,也可下载安装包直接安装 录屏和录音并推流命令行如下 ffmpeg -f dshow -i audio="麦克风 (Realtek High Definition Au" -f dsh
  • ffmpeg推拉_rtmp内网穿透
  •      FFmpeg —— Linux下进行配置      FFmpeg —— 简介      FFmpeg —— Linux下进行配置      FFmpeg —— 工具指令介绍      FFmpeg —— 常用工具命令      FFmpeg —— ...
  • 使用FFmpeg API采集摄像头图像和麦克风音频,支持图像预览,把图像和音频编码后保存成文件,实现视音频同步机制。为更好理解代码,请阅读我博客上相关的一篇文章:...
  • FFmpeg常用推流命令

    千次阅读 2017-04-03 13:06:36
    一、FFmpeg推RTMP流准备工作 首先确保自已已经安装了nginx + rtmp服务器。 打开配置文件nginx.conf 完成如下配置 nginx配置.1 ...二、FFmpeg推流 1.推流MP4文件 视频文件地址:/Users/xu/
  • 测试系统使用的Ubuntu系统,在虚拟机中使用摄像头需注意、将USB的兼容性设置成USB3.0 ,否则出现推流一直卡在那,代码调试,一直阻塞在av_read_frame()函数。 一、RTMP推麦克风ffmpeg -f alsa -thread_queue_size...
  • ffmpeg推流代码样例

    千次阅读 2017-07-01 11:20:56
    ffmpeg在Mac上测试推流代码ffmpeg -f avfoundation -framerate 30 -video_size 640x480 -i "0" -vcodec libx264 -preset ultrafast -acodec libfaac -f flv rtmp://localhost:1935/sqflive/roomffmpeg -f ...
  • ffmpeg API 采集USB摄像头和麦克风,转码后rtmp推流
  • FFMPEG关于推流端降低延迟调节(一) 本文所写的是调节推流端降低延迟能调节的一些方案,有不足请补充,其他端方向类似,例如降低采集缓存,减少编码时间等等,如有需要请参考。本文以windows推流端为例做解释。 1...
  • Windows环境下FFmpeg推流命令集合

    千次阅读 2020-02-27 10:52:31
    ffmpeg 建议使用官网最新的 ffmpeg.exeAPI文档 直播桌面 ffmpeg -f gdigrab -video_size 1920x1080 -i desktop -r 20 -vf scale=1920x1080 -vcodec libx264 -preset ultrafast -pix_fmt yuv420p -f flv rtmp:/...
  • 利用ffmpeg实现rtmp推流

    千次阅读 2019-07-23 11:31:19
    https://www.jianshu.com/p/c141fc7881e7 ffmpeg 常见命令功能: 查看电脑设备: ffmpeg -list_devices true -f dshow -i dummy 测试摄像头是否可用: ffplay -f dshow -i video="USB2.0 PC CAMERA" USB2...
  • C# 利用ffmpeg进行推流 利用Docker 搭建rtmp服务器 这里用docker 拉取一个nginx-rtmp服务镜像: docker pull alfg/nginx-rtmp docker run -it -p 1935:1935 -p 8080:80 --rm alfg/nginx-rtmp FFmpeg.AutoGen ...
  • 用C#Winform写的GB28181监控项目,采用FFMpeg解码,推RTMP流,推流需要Nginx-RTMP配合,在我的另一个资源里有,可以实现稳定接收和解码推流播放,播放用FFPlay直接播放即可测试。因为是测试项目,只支持一路视频播放...
  • FFmpeg采集摄像头图像并推流(RTSP/RTMP)---开发总结

    万次阅读 多人点赞 2019-10-28 08:40:36
    我的一篇博文《如何用FFmpeg API采集摄像头视频和麦克风音频。。。》已经介绍了如何从视音频采集设备获取数据,并且编码、...很多开发者也是调用FFmpeg API来实现推流的,用FFmpeg 做一个推流器很简单,调用流程跟输...
  • ffmpeg和nginx推流 现在问题是ffmpeg推流延迟6秒 <p><img alt="" height="808" src="https://img-ask.csdnimg.cn/upload/1620354043555.png" width="1726" /></p> <p><img alt="" height="1012" src=...
  • 使用ffmpeg for linux 录屏推流

    千次阅读 2018-11-29 22:36:45
    ffmpeg -s 160x128 320x240 640x480 设置帧大小 -r 15 25设置帧频 -f video4linux2 video4linux2 gdigrab x11grab alsa 采集格式 -f flv mp3 mp4 输出格式 -i /dev/video0 /dev/dsp :0.0 hw:0,0 hw:0,1 (-i hw:0,0...
  • ffmpeg 推流命令记录

    2020-07-05 13:37:14
    录屏、推流常用命令 //推流命令 //-re -i C:\output.mp4 -vcodec libx264 -acodec copy -f flv -y rtmp://***/live/chid //拉流 //-i rtmp://127.0.0.1/live/chid -acodec copy -vcodec copy -f flv -y chid.mp4 //-...
  • ffmpeg推流命令

    千次阅读 2017-12-20 06:13:23
    一.ffmpeg推流命令 ffmpeg -re -i /Users/WX/Desktop/login_video.mp4 -vcodec libx264 -acodec aac -strict -2 -f flv rtmp://localhost:1935/rtmplive/room 复制代码 WX:电脑用户名 login_video :推流的视频名 ...
  • 推流默认使用FFmpeg,树莓派官方系统默认安装FFmpeg了的,可以使用ffpmeg -version查看详细信息。 从B站直播间获取rtmp地址和直播码 申请了B站直播间后,用电脑开启直播,一定要用电脑才能获取rtmp地址和直播码,用...
  • 再者,推流你得有个流媒体服务,个人测试用小水管:rtmp://eguid.cc:1935/rtmp/test(小水管,请尽量错开时间使用,另切记推流视频码率不要太高,避免占用太多带宽) 一、摄像头信息采集和录制推流 摄像头名称要...
  • 由于使用obs推流音频始终有各种各样的问题,所以目前打算直接使用FFmpeg推送音频流,期间遇到了各种坑,特此记录。 视频+音频 首先安装好FFmpeg,然后可以用以下命令测试,我们转发服务器用的是janus,也可以用...
  • 我在之前搭建了RTMP和RTSP服务器的基础上(参考《【音视频】RTSP服务器搭建及FFMPEG命令推流(3-1)》以及《【音视频】在windows下搭建RTMP服务器(3-2)》),使用ffmpeg编写关于rtsp|rtmp|hls推流代码。...
  • ffmpeg使用指令实现推流实现直播流程1、实现流程2、nginx配置3、配置FFmpeg4、利用FFmpeg实现推流5、利用vlc实现拉流6、在网页上拉流播放 1、实现流程 Windows下用FFmpeg+nginx+rtmp搭建直播环境 实现推流、拉流 。 ...

空空如也

空空如也

1 2 3 4 5 ... 20
收藏数 972
精华内容 388
关键字:

ffmpeg麦克风推流