FFmpegWriter.cpp 29 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875
  1. #ifdef _WIN32
  2. #include "stdafx.h"
  3. #endif
  4. #include "FFmpegWriter.h"
  5. #include <stdlib.h>
  6. #include <stdio.h>
  7. #include <string.h>
  8. #include <math.h>
  9. #define STREAM_DURATION 10.0
  10. #define STREAM_FRAME_RATE 25 /* 25 images/s */
  11. #define STREAM_PIX_FMT AV_PIX_FMT_YUV420P /* default pix_fmt */
  12. #define SCALE_FLAGS SWS_BICUBIC
  13. // a wrapper around a single output AVStream
  14. struct OutputStream_t {
  15. AVStream *st;
  16. AVCodecContext *enc;
  17. /* pts of the next frame that will be generated */
  18. int64_t next_pts;
  19. int samples_count;
  20. AVFrame *frame;
  21. AVFrame *tmp_frame;
  22. struct SwsContext *sws_ctx;
  23. struct SwrContext *swr_ctx;
  24. };
  25. char av_ts_string[AV_TS_MAX_STRING_SIZE] = { 0 };
  26. #define av_ts2str(ts) av_ts_make_string(av_ts_string, ts)
  27. char av_ts_buff[AV_TS_MAX_STRING_SIZE] = { 0 };
  28. #define av_ts2timestr(ts, tb) av_ts_make_time_string(av_ts_buff, ts, tb)
  29. static void log_packet(const AVFormatContext *fmt_ctx, const AVPacket *pkt, LogApi* pLogApi)
  30. {
  31. AVRational *time_base = &fmt_ctx->streams[pkt->stream_index]->time_base;
  32. //pLogApi->Debug("pts:%s pts_time:%s dts:%s dts_time:%s duration:%s duration_time:%s stream_index:%d",
  33. // av_ts2str(pkt->pts), av_ts2timestr(pkt->pts, time_base),
  34. // av_ts2str(pkt->dts), av_ts2timestr(pkt->dts, time_base),
  35. // av_ts2str(pkt->duration), av_ts2timestr(pkt->duration, time_base),
  36. // pkt->stream_index);
  37. }
  38. static void log_callback(void* ptr, int level, const char* fmt, va_list vl)
  39. {
  40. // va_list vl2;
  41. // char line[1024] = {0};
  42. // static int print_prefix = 1;
  43. // va_copy(vl2, vl);
  44. // av_log_default_callback(ptr, level, fmt, vl);
  45. // av_log_format_line(ptr, level, fmt, vl2, line, sizeof(line), &print_prefix);
  46. // va_end(vl2);
  47. AVClassCategory type = av_default_get_category(ptr);
  48. if (AV_CLASS_CATEGORY_ENCODER == type) {
  49. AVCodecContext* c = (AVCodecContext*)ptr;
  50. if (c != NULL) {
  51. LogApi* pLogApi = (LogApi*)c->opaque;
  52. if (pLogApi != NULL) {
  53. pLogApi->vDebug(fmt, vl);
  54. }
  55. }
  56. }
  57. //if (report_file_level >= level) {
  58. // fputs(line, report_file);
  59. // fflush(report_file);
  60. //}
  61. }
  62. static int write_frame(AVFormatContext *fmt_ctx, const AVRational *time_base, AVStream *st, AVPacket *pkt, LogApi* pLogApi)
  63. {
  64. /* rescale output packet timestamp values from codec to stream timebase */
  65. av_packet_rescale_ts(pkt, *time_base, st->time_base);
  66. pkt->stream_index = st->index;
  67. /* Write the compressed frame to the media file. */
  68. log_packet(fmt_ctx, pkt, pLogApi);
  69. return av_interleaved_write_frame(fmt_ctx, pkt);
  70. }
  71. /* Add an output stream. */
  72. static bool add_stream(LogApi* pLogApi, OutputStream *ost, AVFormatContext *oc,
  73. AVCodec **codec,
  74. enum AVCodecID codec_id,
  75. int width, int height, int colorbit, int nfps,
  76. int nSamplePsec, int nchannels)
  77. {
  78. /* find the encoder */
  79. *codec = avcodec_find_encoder(codec_id);
  80. if (!(*codec)) {
  81. pLogApi->Debug("add_stream failed for Could not find encoder for %s.", avcodec_get_name(codec_id));
  82. return false;
  83. }
  84. ost->st = avformat_new_stream(oc, NULL);
  85. if (!ost->st) {
  86. return false;
  87. }
  88. ost->st->id = oc->nb_streams-1;
  89. AVCodecContext* enc_ctx = avcodec_alloc_context3(*codec);
  90. if (!enc_ctx) {
  91. pLogApi->Debug("add_stream failed for Could not alloc an encoding context!");
  92. return false ;
  93. }
  94. ost->enc = enc_ctx;
  95. switch ((*codec)->type) {
  96. case AVMEDIA_TYPE_AUDIO:
  97. {
  98. enc_ctx->sample_fmt = (*codec)->sample_fmts ? (*codec)->sample_fmts[0] : AV_SAMPLE_FMT_FLTP;
  99. enc_ctx->sample_rate = nSamplePsec;
  100. if ((*codec)->supported_samplerates) {
  101. for (int i = 0; (*codec)->supported_samplerates[i]; i++) {
  102. //pLogApi->Debug("(*codec)->supported_samplerates[%d] = %d.", i, (*codec)->supported_samplerates[i]);
  103. }
  104. }
  105. if (nchannels == 2){
  106. enc_ctx->channel_layout = AV_CH_LAYOUT_STEREO;
  107. enc_ctx->channels = av_get_channel_layout_nb_channels(enc_ctx->channel_layout);
  108. }
  109. else if (nchannels == 1){
  110. enc_ctx->channel_layout = AV_CH_LAYOUT_MONO;
  111. enc_ctx->channels = av_get_channel_layout_nb_channels(enc_ctx->channel_layout);
  112. }
  113. if ((*codec)->channel_layouts) {
  114. enc_ctx->channel_layout = (*codec)->channel_layouts[0];
  115. for (int i = 0; (*codec)->channel_layouts[i]; i++) {
  116. if ((*codec)->channel_layouts[i] == AV_CH_LAYOUT_STEREO) {
  117. enc_ctx->channel_layout = AV_CH_LAYOUT_STEREO;
  118. }
  119. }
  120. }
  121. enc_ctx->channels = av_get_channel_layout_nb_channels(enc_ctx->channel_layout);
  122. AVRational tb = { 1, enc_ctx->sample_rate };
  123. ost->st->time_base = tb;
  124. }
  125. break;
  126. case AVMEDIA_TYPE_VIDEO:
  127. {
  128. enc_ctx->codec_id = codec_id;
  129. //c->bit_rate = 400000;
  130. /* Resolution must be a multiple of two. */
  131. enc_ctx->width = width;
  132. enc_ctx->height = height;
  133. /* timebase: This is the fundamental unit of time (in seconds) in terms
  134. * of which frame timestamps are represented. For fixed-fps content,
  135. * timebase should be 1/framerate and timestamp increments should be
  136. * identical to 1. */
  137. AVRational time_rb = { 1, nfps };
  138. //ost->st->time_base = time_rb;
  139. //AVRational nfps_rb = { nfps, 1 };
  140. //ost->st->avg_frame_rate = nfps_rb;
  141. //enc_ctx->framerate = nfps_rb;
  142. enc_ctx->time_base = time_rb;
  143. //c->gop_size = 12; /* emit one intra frame every twelve frames at most */
  144. enc_ctx->pix_fmt = STREAM_PIX_FMT;
  145. if (enc_ctx->codec_id == AV_CODEC_ID_MPEG2VIDEO) {
  146. /* just for testing, we also add B-frames */
  147. enc_ctx->max_b_frames = 2;
  148. }
  149. if (enc_ctx->codec_id == AV_CODEC_ID_MPEG1VIDEO) {
  150. /* Needed to avoid using macroblocks in which some coeffs overflow.
  151. * This does not happen with normal video, it just happens here as
  152. * the motion of the chroma plane does not match the luma plane. */
  153. enc_ctx->mb_decision = 2;
  154. }
  155. }
  156. av_opt_set(enc_ctx->priv_data, "preset", "superfast", 0);
  157. av_opt_set(enc_ctx->priv_data, "tune", "zerolatency", 0);
  158. break;
  159. default:
  160. break;
  161. }
  162. /* Some formats want stream headers to be separate. */
  163. if (oc->oformat->flags & AVFMT_GLOBALHEADER) {
  164. enc_ctx->flags |= AV_CODEC_FLAG_GLOBAL_HEADER;
  165. }
  166. //pLogApi->Debug("add_stream success get encoder for '%s'", avcodec_get_name(codec_id));
  167. return true;
  168. }
  169. /**************************************************************/
  170. /* audio output */
  171. static AVFrame *alloc_audio_frame(enum AVSampleFormat sample_fmt,
  172. uint64_t channel_layout,
  173. int sample_rate, int nb_samples)
  174. {
  175. AVFrame *frame = av_frame_alloc();
  176. int ret = -1;
  177. if (!frame) {
  178. return NULL;
  179. }
  180. frame->format = sample_fmt;
  181. frame->channel_layout = channel_layout;
  182. frame->sample_rate = sample_rate;
  183. frame->nb_samples = nb_samples;
  184. if (nb_samples) {
  185. ret = av_frame_get_buffer(frame, 0);
  186. if (ret < 0) {
  187. av_frame_free(&frame);
  188. return NULL;
  189. }
  190. }
  191. return frame;
  192. }
  193. static bool open_audio(LogApi* pLogApi, AVFormatContext *oc, AVCodec *codec, OutputStream *ost, AVDictionary *opt_arg, enum AVSampleFormat in_sample_fmt)
  194. {
  195. AVCodecContext *c = NULL;
  196. int nb_samples = 0;
  197. int ret = -1;
  198. AVDictionary *opt = NULL;
  199. c = ost->enc;
  200. c->opaque = pLogApi;
  201. /* open it */
  202. av_dict_copy(&opt, opt_arg, 0);
  203. ret = avcodec_open2(c, codec, &opt);
  204. av_dict_free(&opt);
  205. if (ret < 0) {
  206. pLogApi->Debug("open_audio failed for could not open audio codec: %s, ret: %d.", avcodec_get_name(codec->id), ret);
  207. return false;
  208. }
  209. if (c->codec->capabilities & AV_CODEC_CAP_VARIABLE_FRAME_SIZE) {
  210. nb_samples = 10000;
  211. }
  212. else {
  213. nb_samples = c->frame_size;
  214. }
  215. ost->frame = alloc_audio_frame(c->sample_fmt, c->channel_layout, c->sample_rate, nb_samples);
  216. ost->tmp_frame = alloc_audio_frame(in_sample_fmt, c->channel_layout, c->sample_rate, nb_samples);
  217. /* copy the stream parameters to the muxer */
  218. ret = avcodec_parameters_from_context(ost->st->codecpar, c);
  219. if (ret < 0) {
  220. pLogApi->Debug("open_audio failed for could not copy the stream parameters.");
  221. return false;
  222. }
  223. /* create resampler context */
  224. ost->swr_ctx = swr_alloc();
  225. if (!ost->swr_ctx) {
  226. pLogApi->Debug("open_audio failed for could not allocate resampler context.");
  227. return false;
  228. }
  229. /* set options */
  230. av_opt_set_int(ost->swr_ctx, "in_channel_count", c->channels, 0);
  231. av_opt_set_int(ost->swr_ctx, "in_sample_rate", c->sample_rate, 0);
  232. av_opt_set_sample_fmt(ost->swr_ctx, "in_sample_fmt", in_sample_fmt, 0);
  233. av_opt_set_int(ost->swr_ctx, "out_channel_count", c->channels, 0);
  234. av_opt_set_int(ost->swr_ctx, "out_sample_rate", c->sample_rate, 0);
  235. av_opt_set_sample_fmt(ost->swr_ctx, "out_sample_fmt", c->sample_fmt, 0);
  236. /* initialize the resampling context */
  237. if ((ret = swr_init(ost->swr_ctx)) < 0) {
  238. pLogApi->Debug("open_audio failed for failed to initialize the resampling context.");
  239. return false;
  240. }
  241. //pLogApi->Debug("open_audio success encoder for %s, nb_samples:%d.", avcodec_get_name(codec->id), nb_samples);
  242. return true;
  243. }
  244. /* Prepare a 16 bit dummy audio frame of 'frame_size' samples and
  245. * 'nb_channels' channels. */
  246. static AVFrame *get_audio_frame(OutputStream *ost, ByteBuffer* audio_input_buffer)
  247. {
  248. AVFrame *frame = ost->tmp_frame;
  249. audio_input_buffer->getBytes(frame->data[0], av_samples_get_buffer_size(NULL, frame->channels,
  250. frame->nb_samples,
  251. (AVSampleFormat)frame->format, 0));
  252. frame->pts = ost->next_pts;
  253. ost->next_pts += frame->nb_samples;
  254. return frame;
  255. }
  256. /*
  257. * encode one audio frame and send it to the muxer
  258. * return 1 when encoding is finished, 0 otherwise
  259. */
  260. static int write_audio_frame(LogApi* pLogApi, AVFormatContext *oc, OutputStream *audio_ost, ByteBuffer* audio_input_buffer)
  261. {
  262. AVCodecContext *enc_ctx = NULL;
  263. AVPacket pkt = { 0 }; // data and size must be 0;
  264. AVFrame *frame = NULL;
  265. int ret = -1;
  266. int got_packet = 0;
  267. int dst_nb_samples = 0;
  268. av_init_packet(&pkt);
  269. enc_ctx = audio_ost->enc;
  270. frame = get_audio_frame(audio_ost, audio_input_buffer);
  271. if (frame) {
  272. /* convert samples from native format to destination codec format, using the resampler */
  273. /* compute destination number of samples */
  274. dst_nb_samples = av_rescale_rnd(swr_get_delay(audio_ost->swr_ctx, enc_ctx->sample_rate) + frame->nb_samples,
  275. enc_ctx->sample_rate, enc_ctx->sample_rate, AV_ROUND_UP);
  276. av_assert0(dst_nb_samples == frame->nb_samples);
  277. /* when we pass a frame to the encoder, it may keep a reference to it
  278. * internally;
  279. * make sure we do not overwrite it here
  280. */
  281. ret = av_frame_make_writable(audio_ost->frame);
  282. if (ret < 0) {
  283. pLogApi->Debug("write_audio_frame failed for av_frame_make_writable: %d.", ret);
  284. return -1;
  285. }
  286. #ifdef RVC_OS_WIN
  287. //pLogApi->Debug("write_audio_frame nb_samples: %d, dst_nb_samples: %d, pts: %I64d", frame->nb_samples, dst_nb_samples, frame->pts);
  288. #else
  289. //pLogApi->Debug("write_audio_frame nb_samples: %d, dst_nb_samples: %d, pts: %lld", frame->nb_samples, dst_nb_samples, frame->pts);
  290. #endif
  291. /* convert to destination format */
  292. ret = swr_convert(audio_ost->swr_ctx,
  293. audio_ost->frame->data, dst_nb_samples,
  294. (const uint8_t **)frame->data, frame->nb_samples);
  295. if (ret < 0) {
  296. pLogApi->Debug("write_audio_frame failed for Error while converting.");
  297. return -1;
  298. }
  299. frame = audio_ost->frame;
  300. AVRational rb = { 1, enc_ctx->sample_rate };
  301. frame->pts = av_rescale_q(audio_ost->samples_count, rb, enc_ctx->time_base);
  302. audio_ost->samples_count += dst_nb_samples;
  303. #ifdef RVC_OS_WIN
  304. //pLogApi->Debug("write_audio_frame new nb_samples: %d, pts: %I64d", frame->nb_samples, frame->pts);
  305. #else
  306. //pLogApi->Debug("write_audio_frame new nb_samples: %d, pts: %lld", frame->nb_samples, frame->pts);
  307. #endif
  308. }
  309. ret = avcodec_encode_audio2(enc_ctx, &pkt, frame, &got_packet);
  310. //ret = avcodec_send_frame(c, frame);
  311. //if (ret < 0) {
  312. // pLogApi->Debug("ffmpeg_encoder_process Error sending the frame to the encoder");
  313. //}
  314. //else {
  315. // got_packet = avcodec_receive_packet(c, &pkt);
  316. // if (got_packet < 0) {
  317. // pLogApi->Debug("avcodec_receive_packet failed.");
  318. // return got_packet;
  319. // }
  320. //}
  321. if (got_packet) {
  322. //pLogApi->Debug("write_audio_frame got_packet");
  323. ret = write_frame(oc, &enc_ctx->time_base, audio_ost->st, &pkt, pLogApi);
  324. if (ret < 0) {
  325. pLogApi->Debug("write_audio_frame failed for Error while writing audio frame: %d.", ret);
  326. return -1;
  327. }
  328. }
  329. return (frame || got_packet) ? 0 : 1;
  330. }
  331. /**************************************************************/
  332. /* video output */
  333. static AVFrame *alloc_picture(enum AVPixelFormat pix_fmt, int width, int height)
  334. {
  335. AVFrame *picture = NULL;
  336. int ret = -1;
  337. picture = av_frame_alloc();
  338. if (!picture) {
  339. return NULL;
  340. }
  341. picture->format = pix_fmt;
  342. picture->width = width;
  343. picture->height = height;
  344. /* allocate the buffers for the frame data */
  345. ret = av_frame_get_buffer(picture, 32);
  346. if (ret < 0) {
  347. av_frame_free(&picture);
  348. return NULL;
  349. }
  350. return picture;
  351. }
  352. static bool open_video(LogApi* pLogApi, AVFormatContext *oc, AVCodec *codec, OutputStream *video_ost, AVDictionary *opt_arg, enum AVPixelFormat input_pix_fmt)
  353. {
  354. int ret = -1;
  355. AVCodecContext *enc_ctx = video_ost->enc;
  356. AVDictionary *opt = NULL;
  357. av_dict_copy(&opt, opt_arg, 0);
  358. //huchen add for msmpeg4v3
  359. //av_dict_set(&opt, "lmin", "1180", 0);
  360. //av_dict_set(&opt, "lmax", "2360", 0);
  361. enc_ctx->bit_rate = 128*1000;
  362. //c->qmin = 16;
  363. //c->qmax = 30;
  364. //c->max_b_frames = 0;
  365. enc_ctx->opaque = pLogApi;
  366. /* open the codec */
  367. ret = avcodec_open2(enc_ctx, codec, &opt);
  368. av_dict_free(&opt);
  369. if (ret < 0) {
  370. pLogApi->Debug("open_video failed for Could not open video codec: %s, ret :%d.", avcodec_get_name(codec->id), ret);
  371. return false;
  372. }
  373. /* allocate and init a re-usable frame */
  374. video_ost->frame = alloc_picture(enc_ctx->pix_fmt, enc_ctx->width, enc_ctx->height);
  375. if (!video_ost->frame) {
  376. pLogApi->Debug("open_video failed for Could not allocate video frame.");
  377. return false;
  378. }
  379. /* If the output format is not input_pix_fmt, then a temporary input_pix_fmt
  380. * picture is needed too. It is then converted to the required
  381. * output format. */
  382. video_ost->tmp_frame = NULL;
  383. if (enc_ctx->pix_fmt != input_pix_fmt) {
  384. video_ost->tmp_frame = alloc_picture(input_pix_fmt, enc_ctx->width, enc_ctx->height);
  385. if (!video_ost->tmp_frame) {
  386. pLogApi->Debug("Could not allocate temporary picture.");
  387. return false;
  388. }
  389. }
  390. /* copy the stream parameters to the muxer */
  391. ret = avcodec_parameters_from_context(video_ost->st->codecpar, enc_ctx);
  392. if (ret < 0) {
  393. pLogApi->Debug("open_video failed for Could not copy the stream parameters.");
  394. return false;
  395. }
  396. //pLogApi->Debug("open_video success encoder for %s.", avcodec_get_name(codec->id));
  397. //pLogApi->Debug("open_video success encoder output_fmt: %s.", av_get_pix_fmt_name(enc_ctx->pix_fmt));
  398. //pLogApi->Debug("open_video success encoder input_fmt: %s.", av_get_pix_fmt_name(input_pix_fmt));
  399. //pLogApi->Debug("open_video success encoder, video enc_ctx time_base num: %d.", enc_ctx->time_base.num);
  400. //pLogApi->Debug("open_video success encoder, video enc_ctx time_base den: %d.", enc_ctx->time_base.den);
  401. return true;
  402. }
  403. static AVFrame *get_video_frame(LogApi* pLogApi, OutputStream *video_ost, char *data, int len, AVPixelFormat input_pix_fmt)
  404. {
  405. AVCodecContext *enc_ctx = video_ost->enc;
  406. /* when we pass a frame to the encoder, it may keep a reference to it
  407. * internally; make sure we do not overwrite it here */
  408. if (av_frame_make_writable(video_ost->frame) < 0) {
  409. return NULL;
  410. }
  411. if (enc_ctx->pix_fmt != input_pix_fmt) {
  412. /* we must convert it
  413. * to the codec pixel format if needed */
  414. if (!video_ost->sws_ctx) {
  415. video_ost->sws_ctx = sws_getContext(enc_ctx->width, enc_ctx->height,
  416. input_pix_fmt,
  417. enc_ctx->width,
  418. enc_ctx->height,
  419. enc_ctx->pix_fmt,
  420. SCALE_FLAGS, NULL, NULL, NULL);
  421. if (!video_ost->sws_ctx) {
  422. return NULL;
  423. }
  424. }
  425. //copy
  426. //因videoqueue中视频均为rgb24位格式,默认为24bpp
  427. #ifdef _WIN32
  428. for (int i = video_ost->tmp_frame->height -1, j = 0; i >= 0 && j < video_ost->tmp_frame->height; i--, j++){
  429. memcpy(video_ost->tmp_frame->data[0] + j * video_ost->tmp_frame->linesize[0],
  430. data + i * video_ost->tmp_frame->width * 3, video_ost->tmp_frame->width * 3);
  431. }
  432. #else
  433. if (video_ost->tmp_frame->linesize[0] > video_ost->tmp_frame->width * 3) {
  434. for (int i = 0; i < video_ost->tmp_frame->height; i++)
  435. {
  436. memcpy(video_ost->tmp_frame->data[0] + i * video_ost->tmp_frame->linesize[0],
  437. data + i * video_ost->tmp_frame->width * 3, video_ost->tmp_frame->width * 3);
  438. }
  439. }
  440. else {
  441. memcpy(video_ost->tmp_frame->data[0], data, len);
  442. }
  443. #endif
  444. //convert
  445. sws_scale(video_ost->sws_ctx, (const uint8_t* const*)video_ost->tmp_frame->data,
  446. video_ost->tmp_frame->linesize, 0, enc_ctx->height, video_ost->frame->data,
  447. video_ost->frame->linesize);
  448. }
  449. else {
  450. //copy
  451. //因videoqueue中视频均为rgb24位格式,默认为24bpp
  452. #ifdef _WIN32
  453. for (int i = video_ost->tmp_frame->height - 1, j = 0; i >= 0 && j < video_ost->tmp_frame->height; i--, j++){
  454. memcpy(video_ost->tmp_frame->data[0] + j * video_ost->tmp_frame->linesize[0],
  455. data + i * video_ost->tmp_frame->width * 3, video_ost->tmp_frame->width * 3);
  456. }
  457. #else
  458. if (video_ost->tmp_frame->linesize[0] > video_ost->tmp_frame->width * 3) {
  459. for (int i = 0; i < video_ost->tmp_frame->height; i++)
  460. {
  461. memcpy(video_ost->tmp_frame->data[0] + i * video_ost->tmp_frame->linesize[0],
  462. data + i * video_ost->tmp_frame->width * 3, video_ost->tmp_frame->width * 3);
  463. }
  464. }
  465. else {
  466. memcpy(video_ost->tmp_frame->data[0], data, len);
  467. }
  468. #endif
  469. }
  470. video_ost->frame->pts = video_ost->next_pts++;
  471. //video_ost->frame->key_frame = 1;
  472. return video_ost->frame;
  473. }
  474. /*
  475. * encode one video frame and send it to the muxer
  476. * return 1 when encoding is finished, 0 otherwise
  477. */
  478. static int write_video_frame(LogApi* pLogApi, AVFormatContext *oc, OutputStream *ost, char *data, int len, AVPixelFormat input_pix_fmt)
  479. {
  480. int ret = -1;
  481. AVCodecContext *enc_ctx = NULL;
  482. AVFrame *vframe = NULL;
  483. int got_packet = 0;
  484. AVPacket pkt = { 0 };
  485. enc_ctx = ost->enc;
  486. vframe = get_video_frame(pLogApi, ost, data, len, input_pix_fmt);
  487. //pLogApi->Debug("get_video_frame pts = %d", vframe->pts);
  488. av_init_packet(&pkt);
  489. /* encode the image */
  490. int iencoderet = avcodec_encode_video2(enc_ctx, &pkt, vframe, &got_packet);
  491. if (iencoderet < 0) {
  492. pLogApi->Debug("open_video failed for Error(%d) encoding video frame.", iencoderet);
  493. return -1;
  494. }
  495. else {
  496. //pLogApi->Debug("avcodec_encode_video2 success(%d) and got_packet ret = %d.", iencoderet, got_packet);
  497. }
  498. if (got_packet) {
  499. //pLogApi->Debug("write_video_frame got_packet");
  500. ret = write_frame(oc, &enc_ctx->time_base, ost->st, &pkt, pLogApi);
  501. }
  502. else {
  503. ret = 0;
  504. }
  505. if (ret < 0) {
  506. pLogApi->Debug("open_video failed for Error while writing video frame: %d.", ret);
  507. return -1;
  508. }
  509. return (vframe || got_packet) ? 0 : 1;
  510. }
  511. static void close_stream(AVFormatContext *oc, OutputStream *ost)
  512. {
  513. avcodec_free_context(&ost->enc);
  514. av_frame_free(&ost->frame);
  515. av_frame_free(&ost->tmp_frame);
  516. sws_freeContext(ost->sws_ctx);
  517. swr_free(&ost->swr_ctx);
  518. }
  519. /**************************************************************/
  520. /* media file output */
  521. bool FFmpegWriter::InitWriter(char* filename, int width, int height, int colorbit, int nfps,
  522. int nSamplePsec, int nchannels, int nBitPerSample, int nmaxspacing, int nquality, int nOutBitRate, int iAudioType)
  523. {
  524. int iret = -1;
  525. AVDictionary *opt = NULL;
  526. bool result = true;
  527. if (nBitPerSample != 8 && nBitPerSample != 16 && nBitPerSample != 32){
  528. m_pLogApi->Debug("Init FFmpegWriter Failed for BitPerSample = %d.", nBitPerSample);
  529. return false;
  530. }
  531. if (nchannels != 1 && nchannels != 2){
  532. m_pLogApi->Debug("Init FFmpegWriter Failed for channels = %d.", nchannels);
  533. return false;
  534. }
  535. if (colorbit != 24){
  536. m_pLogApi->Debug("Init FFmpegWriter Failed for colorbit = %d.", colorbit);
  537. return false;
  538. }
  539. if (colorbit == 24){
  540. m_input_pix_fmt = AV_PIX_FMT_BGR24;
  541. } else {
  542. m_input_pix_fmt = STREAM_PIX_FMT;
  543. }
  544. /* allocate the output media context */
  545. avformat_alloc_output_context2(&m_formatctx, NULL, NULL, filename);
  546. if (!m_formatctx) {
  547. m_pLogApi->Debug("Init FFmpegWriter Failed for avformat_alloc_output_context2, filename = %s.", filename);
  548. return false;
  549. }
  550. //av_log_set_level(AV_LOG_ERROR);
  551. //av_log_set_callback(log_callback);
  552. m_outfmt = m_formatctx->oformat;
  553. //m_pLogApi->Debug("real output format name of '%s' is %s, long_name is %s, mime_type is %s, extensions is %s.", filename, m_outfmt->name, m_outfmt->long_name, m_outfmt->mime_type, m_outfmt->extensions);
  554. m_video_st = new OutputStream();
  555. m_audio_st = new OutputStream();
  556. /* Add the audio and video streams using the default format codecs
  557. * and initialize the codecs. */
  558. if (m_outfmt->video_codec != AV_CODEC_ID_NONE) {
  559. result = add_stream(m_pLogApi, m_video_st, m_formatctx, &m_video_codec, m_outfmt->video_codec, width, height, colorbit, nfps, nSamplePsec, nchannels);
  560. if (result == false){
  561. Close();
  562. m_pLogApi->Debug("Init FFmpegWriter Failed for add_stream, video_codec = %d.", m_outfmt->video_codec);
  563. return result;
  564. }
  565. m_bhave_video = true;
  566. }
  567. if (m_outfmt->audio_codec != AV_CODEC_ID_NONE) {
  568. result = add_stream(m_pLogApi, m_audio_st, m_formatctx, &m_audio_codec, m_outfmt->audio_codec, width, height, colorbit, nfps, nSamplePsec, nchannels);
  569. if (result == false){
  570. Close();
  571. m_pLogApi->Debug("Init FFmpegWriter Failed for add_stream, audio_codec = %d.", m_outfmt->audio_codec);
  572. return result;
  573. }
  574. m_bhave_audio = true;
  575. }
  576. /* Now that all the parameters are set, we can open the audio and
  577. * video codecs and allocate the necessary encode buffers. */
  578. if (m_bhave_video){
  579. result = open_video(m_pLogApi, m_formatctx, m_video_codec, m_video_st, opt, m_input_pix_fmt);
  580. if (result == false){
  581. Close();
  582. m_pLogApi->Debug("Init FFmpegWriter Failed for open_video, video_codec = %d.", m_outfmt->video_codec);
  583. return result;
  584. }
  585. }
  586. if (m_bhave_audio){
  587. if (nBitPerSample == 8){
  588. result = open_audio(m_pLogApi, m_formatctx, m_audio_codec, m_audio_st, opt, AV_SAMPLE_FMT_U8);
  589. if (result == false){
  590. Close();
  591. m_pLogApi->Debug("Init FFmpegWriter Failed for open_audio, AV_SAMPLE_FMT_U8.");
  592. return result;
  593. }
  594. }else if (nBitPerSample == 16){
  595. result = open_audio(m_pLogApi, m_formatctx, m_audio_codec, m_audio_st, opt, AV_SAMPLE_FMT_S16);
  596. if (result == false){
  597. Close();
  598. m_pLogApi->Debug("Init FFmpegWriter Failed for open_audio, AV_SAMPLE_FMT_S16.");
  599. return result;
  600. }
  601. }else if (nBitPerSample == 32) {
  602. result = open_audio(m_pLogApi, m_formatctx, m_audio_codec, m_audio_st, opt, AV_SAMPLE_FMT_FLT);
  603. if (result == false) {
  604. Close();
  605. m_pLogApi->Debug("Init FFmpegWriter Failed for open_audio, AV_SAMPLE_FMT_FLT.");
  606. return result;
  607. }
  608. }
  609. }
  610. av_dump_format(m_formatctx, 0, filename, 1);
  611. /* open the output file, if needed */
  612. if (!(m_outfmt->flags & AVFMT_NOFILE)) {
  613. iret = avio_open(&m_formatctx->pb, filename, AVIO_FLAG_WRITE);
  614. if (iret < 0) {
  615. Close();
  616. m_pLogApi->Debug("Init FFmpegWriter Failed for avio_open, %s.", filename);
  617. return false;
  618. }
  619. }
  620. m_audio_input_buffer = new ByteBuffer(3*nBitPerSample/8 * nchannels* nSamplePsec);
  621. m_pLogApi->Debug("Init FFmpegWriter success, audio_input_buffer:%d.", 3 * nBitPerSample / 8 * nchannels * nSamplePsec);
  622. m_bstart = false;
  623. return result;
  624. }
  625. bool FFmpegWriter::StartWrite()
  626. {
  627. AVDictionary *opt = NULL;
  628. /* Write the stream header, if any. */
  629. int ret = avformat_write_header(m_formatctx, &opt);
  630. if (ret < 0) {
  631. m_pLogApi->Debug("StartWrite Failed when opening output file: %d.", ret);
  632. return false;
  633. }
  634. m_pLogApi->Debug("FFmpegWriter StartWrite success.");
  635. m_bstart = true;
  636. return true;
  637. }
  638. bool FFmpegWriter::StopWrite() {
  639. /* Write the trailer, if any. The trailer must be written before you
  640. * close the CodecContexts open when you wrote the header; otherwise
  641. * av_write_trailer() may try to use memory that was freed on
  642. * av_codec_close(). */
  643. if (m_formatctx == NULL){
  644. m_pLogApi->Debug("End Failed when oc is null.");
  645. return false;
  646. }
  647. if (m_bstart) {
  648. av_write_trailer(m_formatctx);
  649. }
  650. Close();
  651. av_log_set_callback(NULL);
  652. m_bstart = false;
  653. m_pLogApi->Debug("FFmpegWriter End success.");
  654. return true;
  655. }
  656. void FFmpegWriter::Close()
  657. {
  658. /* Close each codec. */
  659. if (m_bhave_video) {
  660. close_stream(m_formatctx, m_video_st);
  661. }
  662. if (m_bhave_audio) {
  663. close_stream(m_formatctx, m_audio_st);
  664. }
  665. if (!(m_outfmt->flags & AVFMT_NOFILE)) {
  666. /* Close the output file. */
  667. avio_closep(&m_formatctx->pb);
  668. }
  669. /* free the stream */
  670. if (m_formatctx) {
  671. avformat_free_context(m_formatctx);
  672. }
  673. if (m_video_st != NULL) {
  674. delete m_video_st;
  675. m_video_st = NULL;
  676. }
  677. if (m_audio_st != NULL) {
  678. delete m_audio_st;
  679. m_audio_st = NULL;
  680. }
  681. if (m_audio_input_buffer != NULL) {
  682. delete m_audio_input_buffer;
  683. m_audio_input_buffer = NULL;
  684. }
  685. m_pLogApi->Debug("FFmpegWriter Close success.");
  686. }
  687. bool FFmpegWriter::ReceiveAudioData(unsigned char* pData, unsigned long len)
  688. {
  689. if (m_formatctx == NULL || m_audio_st == NULL || !m_bhave_audio){
  690. m_pLogApi->Debug("ReceiveAudioData Failed when oc is null.");
  691. return false;
  692. }
  693. //插入audio_input_buffer
  694. m_audio_input_buffer->putBytes(pData, len);
  695. AVFrame* frame = m_audio_st->tmp_frame;
  696. int frame_size = av_samples_get_buffer_size(NULL, frame->channels,
  697. frame->nb_samples,
  698. (AVSampleFormat)frame->format, 0);
  699. //m_pLogApi->Debug("ReceiveAudioData len:%d, available_data_len:%d, frame_size:%d.", len, audio_input_buffer->bytesRemaining(), frame_size);
  700. //循环写帧
  701. while (frame_size <= m_audio_input_buffer->bytesRemaining()) {
  702. //m_pLogApi->Debug("ReceiveAudioData available_data_len:%d.", audio_input_buffer->bytesRemaining());
  703. int result = write_audio_frame(m_pLogApi, m_formatctx, m_audio_st, m_audio_input_buffer);
  704. if (result < 0) {
  705. m_pLogApi->Debug("write_audio_frame Failed, %d.", result);
  706. return false;
  707. }
  708. }
  709. //重设置audio_input_buffer,避免buffer过大
  710. uint8_t* tmp_buffer = new uint8_t[frame_size];
  711. int bytesRemaining = m_audio_input_buffer->bytesRemaining();
  712. m_audio_input_buffer->getBytes(tmp_buffer, bytesRemaining);
  713. m_audio_input_buffer->resize(bytesRemaining);
  714. m_audio_input_buffer->putBytes(tmp_buffer, bytesRemaining);
  715. delete tmp_buffer;
  716. return true;
  717. }
  718. bool FFmpegWriter::ReceiveVideoData(unsigned char* pData, unsigned long len)
  719. {
  720. if (m_formatctx == NULL || m_video_st == NULL || !m_bhave_video){
  721. m_pLogApi->Debug("ReceiveVideoData Failed when oc is null.");
  722. return false;
  723. }
  724. //m_pLogApi->Debug("ReceiveVideoData len:%d", len);
  725. int result = write_video_frame(m_pLogApi, m_formatctx, m_video_st, (char*)pData, len, m_input_pix_fmt);
  726. if (result < 0){
  727. m_pLogApi->Debug("write_video_frame Failed, %d.", result);
  728. return false;
  729. }
  730. return true;
  731. }