FFmpegWriter.cpp 28 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848
  1. #include "stdafx.h"
  2. #include "FFmpegWriter.h"
  3. #include <stdlib.h>
  4. #include <stdio.h>
  5. #include <string.h>
  6. #include <math.h>
  7. #define STREAM_DURATION 10.0
  8. #define STREAM_FRAME_RATE 25 /* 25 images/s */
  9. #define STREAM_PIX_FMT AV_PIX_FMT_YUV420P /* default pix_fmt */
  10. #define SCALE_FLAGS SWS_BICUBIC
  11. // a wrapper around a single output AVStream
  12. struct OutputStream_t {
  13. AVStream *st;
  14. AVCodecContext *enc;
  15. /* pts of the next frame that will be generated */
  16. int64_t next_pts;
  17. int samples_count;
  18. AVFrame *frame;
  19. AVFrame *tmp_frame;
  20. struct SwsContext *sws_ctx;
  21. struct SwrContext *swr_ctx;
  22. };
  23. char av_ts_string[AV_TS_MAX_STRING_SIZE] = { 0 };
  24. #define av_ts2str(ts) av_ts_make_string(av_ts_string, ts)
  25. char av_ts_buff[AV_TS_MAX_STRING_SIZE] = { 0 };
  26. #define av_ts2timestr(ts, tb) av_ts_make_time_string(av_ts_buff, ts, tb)
  27. static void log_packet(const AVFormatContext *fmt_ctx, const AVPacket *pkt, LogApi* pLogApi)
  28. {
  29. AVRational *time_base = &fmt_ctx->streams[pkt->stream_index]->time_base;
  30. //pLogApi->Debug("pts:%s pts_time:%s dts:%s dts_time:%s duration:%s duration_time:%s stream_index:%d",
  31. // av_ts2str(pkt->pts), av_ts2timestr(pkt->pts, time_base),
  32. // av_ts2str(pkt->dts), av_ts2timestr(pkt->dts, time_base),
  33. // av_ts2str(pkt->duration), av_ts2timestr(pkt->duration, time_base),
  34. // pkt->stream_index);
  35. }
  36. static void log_callback(void* ptr, int level, const char* fmt, va_list vl)
  37. {
  38. // va_list vl2;
  39. // char line[1024] = {0};
  40. // static int print_prefix = 1;
  41. // va_copy(vl2, vl);
  42. // av_log_default_callback(ptr, level, fmt, vl);
  43. // av_log_format_line(ptr, level, fmt, vl2, line, sizeof(line), &print_prefix);
  44. // va_end(vl2);
  45. AVClassCategory type = av_default_get_category(ptr);
  46. if (AV_CLASS_CATEGORY_ENCODER == type) {
  47. AVCodecContext* c = (AVCodecContext*)ptr;
  48. if (c != NULL) {
  49. LogApi* pLogApi = (LogApi*)c->opaque;
  50. if (pLogApi != NULL) {
  51. pLogApi->vDebug(fmt, vl);
  52. }
  53. }
  54. }
  55. //if (report_file_level >= level) {
  56. // fputs(line, report_file);
  57. // fflush(report_file);
  58. //}
  59. }
  60. static int write_frame(AVFormatContext *fmt_ctx, const AVRational *time_base, AVStream *st, AVPacket *pkt, LogApi* pLogApi)
  61. {
  62. /* rescale output packet timestamp values from codec to stream timebase */
  63. av_packet_rescale_ts(pkt, *time_base, st->time_base);
  64. pkt->stream_index = st->index;
  65. /* Write the compressed frame to the media file. */
  66. log_packet(fmt_ctx, pkt, pLogApi);
  67. return av_interleaved_write_frame(fmt_ctx, pkt);
  68. }
  69. /* Add an output stream. */
  70. static bool add_stream(LogApi* pLogApi, OutputStream *ost, AVFormatContext *oc,
  71. AVCodec **codec,
  72. enum AVCodecID codec_id,
  73. int width, int height, int colorbit, int nfps,
  74. int nSamplePsec, int nchannels)
  75. {
  76. /* find the encoder */
  77. *codec = avcodec_find_encoder(codec_id);
  78. if (!(*codec)) {
  79. pLogApi->Debug("add_stream failed for Could not find encoder for %s", avcodec_get_name(codec_id));
  80. return false;
  81. }
  82. ost->st = avformat_new_stream(oc, NULL);
  83. if (!ost->st) {
  84. return false;
  85. }
  86. ost->st->id = oc->nb_streams-1;
  87. AVCodecContext* enc_ctx = avcodec_alloc_context3(*codec);
  88. if (!enc_ctx) {
  89. pLogApi->Debug("add_stream failed for Could not alloc an encoding context!");
  90. return false ;
  91. }
  92. ost->enc = enc_ctx;
  93. switch ((*codec)->type) {
  94. case AVMEDIA_TYPE_AUDIO:
  95. {
  96. enc_ctx->sample_fmt = (*codec)->sample_fmts ? (*codec)->sample_fmts[0] : AV_SAMPLE_FMT_FLTP;
  97. enc_ctx->sample_rate = nSamplePsec;
  98. if ((*codec)->supported_samplerates) {
  99. for (int i = 0; (*codec)->supported_samplerates[i]; i++) {
  100. pLogApi->Debug("(*codec)->supported_samplerates[%d] = %d.", i, (*codec)->supported_samplerates[i]);
  101. }
  102. }
  103. if (nchannels == 2){
  104. enc_ctx->channel_layout = AV_CH_LAYOUT_STEREO;
  105. enc_ctx->channels = av_get_channel_layout_nb_channels(enc_ctx->channel_layout);
  106. }
  107. else if (nchannels == 1){
  108. enc_ctx->channel_layout = AV_CH_LAYOUT_MONO;
  109. enc_ctx->channels = av_get_channel_layout_nb_channels(enc_ctx->channel_layout);
  110. }
  111. if ((*codec)->channel_layouts) {
  112. enc_ctx->channel_layout = (*codec)->channel_layouts[0];
  113. for (int i = 0; (*codec)->channel_layouts[i]; i++) {
  114. if ((*codec)->channel_layouts[i] == AV_CH_LAYOUT_STEREO) {
  115. enc_ctx->channel_layout = AV_CH_LAYOUT_STEREO;
  116. }
  117. }
  118. }
  119. enc_ctx->channels = av_get_channel_layout_nb_channels(enc_ctx->channel_layout);
  120. AVRational tb = { 1, enc_ctx->sample_rate };
  121. ost->st->time_base = tb;
  122. }
  123. break;
  124. case AVMEDIA_TYPE_VIDEO:
  125. {
  126. enc_ctx->codec_id = codec_id;
  127. //c->bit_rate = 400000;
  128. /* Resolution must be a multiple of two. */
  129. enc_ctx->width = width;
  130. enc_ctx->height = height;
  131. /* timebase: This is the fundamental unit of time (in seconds) in terms
  132. * of which frame timestamps are represented. For fixed-fps content,
  133. * timebase should be 1/framerate and timestamp increments should be
  134. * identical to 1. */
  135. AVRational time_rb = { 1, nfps };
  136. //ost->st->time_base = time_rb;
  137. //AVRational nfps_rb = { nfps, 1 };
  138. //ost->st->avg_frame_rate = nfps_rb;
  139. //enc_ctx->framerate = nfps_rb;
  140. enc_ctx->time_base = time_rb;
  141. //c->gop_size = 12; /* emit one intra frame every twelve frames at most */
  142. enc_ctx->pix_fmt = STREAM_PIX_FMT;
  143. if (enc_ctx->codec_id == AV_CODEC_ID_MPEG2VIDEO) {
  144. /* just for testing, we also add B-frames */
  145. enc_ctx->max_b_frames = 2;
  146. }
  147. if (enc_ctx->codec_id == AV_CODEC_ID_MPEG1VIDEO) {
  148. /* Needed to avoid using macroblocks in which some coeffs overflow.
  149. * This does not happen with normal video, it just happens here as
  150. * the motion of the chroma plane does not match the luma plane. */
  151. enc_ctx->mb_decision = 2;
  152. }
  153. }
  154. av_opt_set(enc_ctx->priv_data, "preset", "superfast", 0);
  155. av_opt_set(enc_ctx->priv_data, "tune", "zerolatency", 0);
  156. break;
  157. default:
  158. break;
  159. }
  160. /* Some formats want stream headers to be separate. */
  161. if (oc->oformat->flags & AVFMT_GLOBALHEADER) {
  162. enc_ctx->flags |= AV_CODEC_FLAG_GLOBAL_HEADER;
  163. }
  164. pLogApi->Debug("add_stream success get encoder for '%s'", avcodec_get_name(codec_id));
  165. return true;
  166. }
  167. /**************************************************************/
  168. /* audio output */
  169. static AVFrame *alloc_audio_frame(enum AVSampleFormat sample_fmt,
  170. uint64_t channel_layout,
  171. int sample_rate, int nb_samples)
  172. {
  173. AVFrame *frame = av_frame_alloc();
  174. int ret = -1;
  175. if (!frame) {
  176. return NULL;
  177. }
  178. frame->format = sample_fmt;
  179. frame->channel_layout = channel_layout;
  180. frame->sample_rate = sample_rate;
  181. frame->nb_samples = nb_samples;
  182. if (nb_samples) {
  183. ret = av_frame_get_buffer(frame, 0);
  184. if (ret < 0) {
  185. av_frame_free(&frame);
  186. return NULL;
  187. }
  188. }
  189. return frame;
  190. }
  191. static bool open_audio(LogApi* pLogApi, AVFormatContext *oc, AVCodec *codec, OutputStream *ost, AVDictionary *opt_arg, enum AVSampleFormat in_sample_fmt)
  192. {
  193. AVCodecContext *c = NULL;
  194. int nb_samples = 0;
  195. int ret = -1;
  196. AVDictionary *opt = NULL;
  197. c = ost->enc;
  198. c->opaque = pLogApi;
  199. /* open it */
  200. av_dict_copy(&opt, opt_arg, 0);
  201. ret = avcodec_open2(c, codec, &opt);
  202. av_dict_free(&opt);
  203. if (ret < 0) {
  204. pLogApi->Debug("open_audio failed for could not open audio codec: %s, ret: %d.", avcodec_get_name(codec->id), ret);
  205. return false;
  206. }
  207. if (c->codec->capabilities & AV_CODEC_CAP_VARIABLE_FRAME_SIZE) {
  208. nb_samples = 10000;
  209. }
  210. else {
  211. nb_samples = c->frame_size;
  212. }
  213. ost->frame = alloc_audio_frame(c->sample_fmt, c->channel_layout, c->sample_rate, nb_samples);
  214. ost->tmp_frame = alloc_audio_frame(in_sample_fmt, c->channel_layout, c->sample_rate, nb_samples);
  215. /* copy the stream parameters to the muxer */
  216. ret = avcodec_parameters_from_context(ost->st->codecpar, c);
  217. if (ret < 0) {
  218. pLogApi->Debug("open_audio failed for could not copy the stream parameters.");
  219. return false;
  220. }
  221. /* create resampler context */
  222. ost->swr_ctx = swr_alloc();
  223. if (!ost->swr_ctx) {
  224. pLogApi->Debug("open_audio failed for could not allocate resampler context.");
  225. return false;
  226. }
  227. /* set options */
  228. av_opt_set_int(ost->swr_ctx, "in_channel_count", c->channels, 0);
  229. av_opt_set_int(ost->swr_ctx, "in_sample_rate", c->sample_rate, 0);
  230. av_opt_set_sample_fmt(ost->swr_ctx, "in_sample_fmt", in_sample_fmt, 0);
  231. av_opt_set_int(ost->swr_ctx, "out_channel_count", c->channels, 0);
  232. av_opt_set_int(ost->swr_ctx, "out_sample_rate", c->sample_rate, 0);
  233. av_opt_set_sample_fmt(ost->swr_ctx, "out_sample_fmt", c->sample_fmt, 0);
  234. /* initialize the resampling context */
  235. if ((ret = swr_init(ost->swr_ctx)) < 0) {
  236. pLogApi->Debug("open_audio failed for failed to initialize the resampling context.");
  237. return false;
  238. }
  239. pLogApi->Debug("open_audio success encoder for %s, nb_samples:%d.", avcodec_get_name(codec->id), nb_samples);
  240. return true;
  241. }
  242. /* Prepare a 16 bit dummy audio frame of 'frame_size' samples and
  243. * 'nb_channels' channels. */
  244. static AVFrame *get_audio_frame(OutputStream *ost, ByteBuffer* audio_input_buffer)
  245. {
  246. AVFrame *frame = ost->tmp_frame;
  247. audio_input_buffer->getBytes(frame->data[0], av_samples_get_buffer_size(NULL, frame->channels,
  248. frame->nb_samples,
  249. (AVSampleFormat)frame->format, 0));
  250. frame->pts = ost->next_pts;
  251. ost->next_pts += frame->nb_samples;
  252. return frame;
  253. }
  254. /*
  255. * encode one audio frame and send it to the muxer
  256. * return 1 when encoding is finished, 0 otherwise
  257. */
  258. static int write_audio_frame(LogApi* pLogApi, AVFormatContext *oc, OutputStream *audio_ost, ByteBuffer* audio_input_buffer)
  259. {
  260. AVCodecContext *enc_ctx = NULL;
  261. AVPacket pkt = { 0 }; // data and size must be 0;
  262. AVFrame *frame = NULL;
  263. int ret = -1;
  264. int got_packet = 0;
  265. int dst_nb_samples = 0;
  266. av_init_packet(&pkt);
  267. enc_ctx = audio_ost->enc;
  268. frame = get_audio_frame(audio_ost, audio_input_buffer);
  269. if (frame) {
  270. /* convert samples from native format to destination codec format, using the resampler */
  271. /* compute destination number of samples */
  272. dst_nb_samples = av_rescale_rnd(swr_get_delay(audio_ost->swr_ctx, enc_ctx->sample_rate) + frame->nb_samples,
  273. enc_ctx->sample_rate, enc_ctx->sample_rate, AV_ROUND_UP);
  274. av_assert0(dst_nb_samples == frame->nb_samples);
  275. /* when we pass a frame to the encoder, it may keep a reference to it
  276. * internally;
  277. * make sure we do not overwrite it here
  278. */
  279. ret = av_frame_make_writable(audio_ost->frame);
  280. if (ret < 0) {
  281. pLogApi->Debug("write_audio_frame failed for av_frame_make_writable: %d", ret);
  282. return -1;
  283. }
  284. #ifdef RVC_OS_WIN
  285. //pLogApi->Debug("write_audio_frame nb_samples: %d, dst_nb_samples: %d, pts: %I64d", frame->nb_samples, dst_nb_samples, frame->pts);
  286. #else
  287. //pLogApi->Debug("write_audio_frame nb_samples: %d, dst_nb_samples: %d, pts: %lld", frame->nb_samples, dst_nb_samples, frame->pts);
  288. #endif
  289. /* convert to destination format */
  290. ret = swr_convert(audio_ost->swr_ctx,
  291. audio_ost->frame->data, dst_nb_samples,
  292. (const uint8_t **)frame->data, frame->nb_samples);
  293. if (ret < 0) {
  294. pLogApi->Debug("write_audio_frame failed for Error while converting");
  295. return -1;
  296. }
  297. frame = audio_ost->frame;
  298. AVRational rb = { 1, enc_ctx->sample_rate };
  299. frame->pts = av_rescale_q(audio_ost->samples_count, rb, enc_ctx->time_base);
  300. audio_ost->samples_count += dst_nb_samples;
  301. #ifdef RVC_OS_WIN
  302. //pLogApi->Debug("write_audio_frame new nb_samples: %d, pts: %I64d", frame->nb_samples, frame->pts);
  303. #else
  304. //pLogApi->Debug("write_audio_frame new nb_samples: %d, pts: %lld", frame->nb_samples, frame->pts);
  305. #endif
  306. }
  307. ret = avcodec_encode_audio2(enc_ctx, &pkt, frame, &got_packet);
  308. //ret = avcodec_send_frame(c, frame);
  309. //if (ret < 0) {
  310. // pLogApi->Debug("ffmpeg_encoder_process Error sending the frame to the encoder");
  311. //}
  312. //else {
  313. // got_packet = avcodec_receive_packet(c, &pkt);
  314. // if (got_packet < 0) {
  315. // pLogApi->Debug("avcodec_receive_packet failed.");
  316. // return got_packet;
  317. // }
  318. //}
  319. if (got_packet) {
  320. //pLogApi->Debug("write_audio_frame got_packet");
  321. ret = write_frame(oc, &enc_ctx->time_base, audio_ost->st, &pkt, pLogApi);
  322. if (ret < 0) {
  323. pLogApi->Debug("write_audio_frame failed for Error while writing audio frame: %d", ret);
  324. return -1;
  325. }
  326. }
  327. return (frame || got_packet) ? 0 : 1;
  328. }
  329. /**************************************************************/
  330. /* video output */
  331. static AVFrame *alloc_picture(enum AVPixelFormat pix_fmt, int width, int height)
  332. {
  333. AVFrame *picture = NULL;
  334. int ret = -1;
  335. picture = av_frame_alloc();
  336. if (!picture) {
  337. return NULL;
  338. }
  339. picture->format = pix_fmt;
  340. picture->width = width;
  341. picture->height = height;
  342. /* allocate the buffers for the frame data */
  343. ret = av_frame_get_buffer(picture, 32);
  344. if (ret < 0) {
  345. av_frame_free(&picture);
  346. return NULL;
  347. }
  348. return picture;
  349. }
  350. static bool open_video(LogApi* pLogApi, AVFormatContext *oc, AVCodec *codec, OutputStream *video_ost, AVDictionary *opt_arg, enum AVPixelFormat input_pix_fmt)
  351. {
  352. int ret = -1;
  353. AVCodecContext *enc_ctx = video_ost->enc;
  354. AVDictionary *opt = NULL;
  355. av_dict_copy(&opt, opt_arg, 0);
  356. //huchen add for msmpeg4v3
  357. //av_dict_set(&opt, "lmin", "1180", 0);
  358. //av_dict_set(&opt, "lmax", "2360", 0);
  359. enc_ctx->bit_rate = 128*1000;
  360. //c->qmin = 16;
  361. //c->qmax = 30;
  362. //c->max_b_frames = 0;
  363. enc_ctx->opaque = pLogApi;
  364. /* open the codec */
  365. ret = avcodec_open2(enc_ctx, codec, &opt);
  366. av_dict_free(&opt);
  367. if (ret < 0) {
  368. pLogApi->Debug("open_video failed for Could not open video codec: %s, ret :%d", avcodec_get_name(codec->id), ret);
  369. return false;
  370. }
  371. /* allocate and init a re-usable frame */
  372. video_ost->frame = alloc_picture(enc_ctx->pix_fmt, enc_ctx->width, enc_ctx->height);
  373. if (!video_ost->frame) {
  374. pLogApi->Debug("open_video failed for Could not allocate video frame");
  375. return false;
  376. }
  377. /* If the output format is not input_pix_fmt, then a temporary input_pix_fmt
  378. * picture is needed too. It is then converted to the required
  379. * output format. */
  380. video_ost->tmp_frame = NULL;
  381. if (enc_ctx->pix_fmt != input_pix_fmt) {
  382. video_ost->tmp_frame = alloc_picture(input_pix_fmt, enc_ctx->width, enc_ctx->height);
  383. if (!video_ost->tmp_frame) {
  384. pLogApi->Debug("Could not allocate temporary picture");
  385. return false;
  386. }
  387. }
  388. /* copy the stream parameters to the muxer */
  389. ret = avcodec_parameters_from_context(video_ost->st->codecpar, enc_ctx);
  390. if (ret < 0) {
  391. pLogApi->Debug("open_video failed for Could not copy the stream parameters");
  392. return false;
  393. }
  394. pLogApi->Debug("open_video success encoder for %s.", avcodec_get_name(codec->id));
  395. pLogApi->Debug("open_video success encoder output_fmt: %s.", av_get_pix_fmt_name(enc_ctx->pix_fmt));
  396. pLogApi->Debug("open_video success encoder input_fmt: %s.", av_get_pix_fmt_name(input_pix_fmt));
  397. pLogApi->Debug("open_video success encoder, video enc_ctx time_base num: %d.", enc_ctx->time_base.num);
  398. pLogApi->Debug("open_video success encoder, video enc_ctx time_base den: %d.", enc_ctx->time_base.den);
  399. return true;
  400. }
  401. static AVFrame *get_video_frame(LogApi* pLogApi, OutputStream *video_ost, char *data, int len, AVPixelFormat input_pix_fmt)
  402. {
  403. AVCodecContext *enc_ctx = video_ost->enc;
  404. /* when we pass a frame to the encoder, it may keep a reference to it
  405. * internally; make sure we do not overwrite it here */
  406. if (av_frame_make_writable(video_ost->frame) < 0) {
  407. return NULL;
  408. }
  409. if (enc_ctx->pix_fmt != input_pix_fmt) {
  410. /* we must convert it
  411. * to the codec pixel format if needed */
  412. if (!video_ost->sws_ctx) {
  413. video_ost->sws_ctx = sws_getContext(enc_ctx->width, enc_ctx->height,
  414. input_pix_fmt,
  415. enc_ctx->width,
  416. enc_ctx->height,
  417. enc_ctx->pix_fmt,
  418. SCALE_FLAGS, NULL, NULL, NULL);
  419. if (!video_ost->sws_ctx) {
  420. return NULL;
  421. }
  422. }
  423. //copy
  424. //因videoqueue中视频均为rgb24位格式,默认为24bpp
  425. for (int i = video_ost->tmp_frame->height -1, j = 0; i >= 0 && j < video_ost->tmp_frame->height; i--, j++){
  426. memcpy(video_ost->tmp_frame->data[0] + j * video_ost->tmp_frame->linesize[0],
  427. data + i * video_ost->tmp_frame->width * 3, video_ost->tmp_frame->width * 3);
  428. }
  429. //convert
  430. sws_scale(video_ost->sws_ctx, (const uint8_t* const*)video_ost->tmp_frame->data,
  431. video_ost->tmp_frame->linesize, 0, enc_ctx->height, video_ost->frame->data,
  432. video_ost->frame->linesize);
  433. }
  434. else {
  435. //copy
  436. //因videoqueue中视频均为rgb24位格式,默认为24bpp
  437. for (int i = video_ost->tmp_frame->height - 1, j = 0; i >= 0 && j < video_ost->tmp_frame->height; i--, j++){
  438. memcpy(video_ost->tmp_frame->data[0] + j * video_ost->tmp_frame->linesize[0],
  439. data + i * video_ost->tmp_frame->width * 3, video_ost->tmp_frame->width * 3);
  440. }
  441. }
  442. video_ost->frame->pts = video_ost->next_pts++;
  443. //video_ost->frame->key_frame = 1;
  444. return video_ost->frame;
  445. }
  446. /*
  447. * encode one video frame and send it to the muxer
  448. * return 1 when encoding is finished, 0 otherwise
  449. */
  450. static int write_video_frame(LogApi* pLogApi, AVFormatContext *oc, OutputStream *ost, char *data, int len, AVPixelFormat input_pix_fmt)
  451. {
  452. int ret = -1;
  453. AVCodecContext *enc_ctx = NULL;
  454. AVFrame *vframe = NULL;
  455. int got_packet = 0;
  456. AVPacket pkt = { 0 };
  457. enc_ctx = ost->enc;
  458. vframe = get_video_frame(pLogApi, ost, data, len, input_pix_fmt);
  459. //pLogApi->Debug("get_video_frame pts = %d", vframe->pts);
  460. av_init_packet(&pkt);
  461. /* encode the image */
  462. int iencoderet = avcodec_encode_video2(enc_ctx, &pkt, vframe, &got_packet);
  463. if (iencoderet < 0) {
  464. pLogApi->Debug("open_video failed for Error(%d) encoding video frame.", iencoderet);
  465. return -1;
  466. }
  467. else {
  468. //pLogApi->Debug("avcodec_encode_video2 success(%d) and got_packet ret = %d.", iencoderet, got_packet);
  469. }
  470. if (got_packet) {
  471. //pLogApi->Debug("write_video_frame got_packet");
  472. ret = write_frame(oc, &enc_ctx->time_base, ost->st, &pkt, pLogApi);
  473. }
  474. else {
  475. ret = 0;
  476. }
  477. if (ret < 0) {
  478. pLogApi->Debug("open_video failed for Error while writing video frame: %d", ret);
  479. return -1;
  480. }
  481. return (vframe || got_packet) ? 0 : 1;
  482. }
  483. static void close_stream(AVFormatContext *oc, OutputStream *ost)
  484. {
  485. avcodec_free_context(&ost->enc);
  486. av_frame_free(&ost->frame);
  487. av_frame_free(&ost->tmp_frame);
  488. sws_freeContext(ost->sws_ctx);
  489. swr_free(&ost->swr_ctx);
  490. }
  491. /**************************************************************/
  492. /* media file output */
  493. bool FFmpegWriter::InitWriter(char* filename, int width, int height, int colorbit, int nfps,
  494. int nSamplePsec, int nchannels, int nBitPerSample, int nmaxspacing, int nquality, int nOutBitRate, int iAudioType)
  495. {
  496. int iret = -1;
  497. AVDictionary *opt = NULL;
  498. bool result = true;
  499. if (nBitPerSample != 8 && nBitPerSample != 16 && nBitPerSample != 32){
  500. m_pLogApi->Debug("Init FFmpegWriter Failed for BitPerSample = %d", nBitPerSample);
  501. return false;
  502. }
  503. if (nchannels != 1 && nchannels != 2){
  504. m_pLogApi->Debug("Init FFmpegWriter Failed for channels = %d", nchannels);
  505. return false;
  506. }
  507. if (colorbit != 24){
  508. m_pLogApi->Debug("Init FFmpegWriter Failed for colorbit = %d", colorbit);
  509. return false;
  510. }
  511. if (colorbit == 24){
  512. m_input_pix_fmt = AV_PIX_FMT_BGR24;
  513. } else {
  514. m_input_pix_fmt = STREAM_PIX_FMT;
  515. }
  516. /* allocate the output media context */
  517. avformat_alloc_output_context2(&m_formatctx, NULL, NULL, filename);
  518. if (!m_formatctx) {
  519. m_pLogApi->Debug("Init FFmpegWriter Failed for avformat_alloc_output_context2, filename = %s", filename);
  520. return false;
  521. }
  522. //av_log_set_level(AV_LOG_ERROR);
  523. //av_log_set_callback(log_callback);
  524. m_outfmt = m_formatctx->oformat;
  525. m_pLogApi->Debug("real output format name of '%s' is %s, long_name is %s, mime_type is %s, extensions is %s.", filename, m_outfmt->name, m_outfmt->long_name, m_outfmt->mime_type, m_outfmt->extensions);
  526. m_video_st = new OutputStream();
  527. m_audio_st = new OutputStream();
  528. /* Add the audio and video streams using the default format codecs
  529. * and initialize the codecs. */
  530. if (m_outfmt->video_codec != AV_CODEC_ID_NONE) {
  531. result = add_stream(m_pLogApi, m_video_st, m_formatctx, &m_video_codec, m_outfmt->video_codec, width, height, colorbit, nfps, nSamplePsec, nchannels);
  532. if (result == false){
  533. Close();
  534. m_pLogApi->Debug("Init FFmpegWriter Failed for add_stream, video_codec = %d", m_outfmt->video_codec);
  535. return result;
  536. }
  537. m_bhave_video = true;
  538. }
  539. if (m_outfmt->audio_codec != AV_CODEC_ID_NONE) {
  540. result = add_stream(m_pLogApi, m_audio_st, m_formatctx, &m_audio_codec, m_outfmt->audio_codec, width, height, colorbit, nfps, nSamplePsec, nchannels);
  541. if (result == false){
  542. Close();
  543. m_pLogApi->Debug("Init FFmpegWriter Failed for add_stream, audio_codec = %d", m_outfmt->audio_codec);
  544. return result;
  545. }
  546. m_bhave_audio = true;
  547. }
  548. /* Now that all the parameters are set, we can open the audio and
  549. * video codecs and allocate the necessary encode buffers. */
  550. if (m_bhave_video){
  551. result = open_video(m_pLogApi, m_formatctx, m_video_codec, m_video_st, opt, m_input_pix_fmt);
  552. if (result == false){
  553. Close();
  554. m_pLogApi->Debug("Init FFmpegWriter Failed for open_video, video_codec = %d", m_outfmt->video_codec);
  555. return result;
  556. }
  557. }
  558. if (m_bhave_audio){
  559. if (nBitPerSample == 8){
  560. result = open_audio(m_pLogApi, m_formatctx, m_audio_codec, m_audio_st, opt, AV_SAMPLE_FMT_U8);
  561. if (result == false){
  562. Close();
  563. m_pLogApi->Debug("Init FFmpegWriter Failed for open_audio, AV_SAMPLE_FMT_U8");
  564. return result;
  565. }
  566. }else if (nBitPerSample == 16){
  567. result = open_audio(m_pLogApi, m_formatctx, m_audio_codec, m_audio_st, opt, AV_SAMPLE_FMT_S16);
  568. if (result == false){
  569. Close();
  570. m_pLogApi->Debug("Init FFmpegWriter Failed for open_audio, AV_SAMPLE_FMT_S16");
  571. return result;
  572. }
  573. }else if (nBitPerSample == 32) {
  574. result = open_audio(m_pLogApi, m_formatctx, m_audio_codec, m_audio_st, opt, AV_SAMPLE_FMT_FLT);
  575. if (result == false) {
  576. Close();
  577. m_pLogApi->Debug("Init FFmpegWriter Failed for open_audio, AV_SAMPLE_FMT_FLT");
  578. return result;
  579. }
  580. }
  581. }
  582. av_dump_format(m_formatctx, 0, filename, 1);
  583. /* open the output file, if needed */
  584. if (!(m_outfmt->flags & AVFMT_NOFILE)) {
  585. iret = avio_open(&m_formatctx->pb, filename, AVIO_FLAG_WRITE);
  586. if (iret < 0) {
  587. Close();
  588. m_pLogApi->Debug("Init FFmpegWriter Failed for avio_open, %s", filename);
  589. return false;
  590. }
  591. }
  592. m_audio_input_buffer = new ByteBuffer(3*nBitPerSample/8 * nchannels* nSamplePsec);
  593. m_pLogApi->Debug("Init FFmpegWriter success, audio_input_buffer:%d", 3 * nBitPerSample / 8 * nchannels * nSamplePsec);
  594. m_bstart = false;
  595. return result;
  596. }
  597. bool FFmpegWriter::StartWrite()
  598. {
  599. AVDictionary *opt = NULL;
  600. /* Write the stream header, if any. */
  601. int ret = avformat_write_header(m_formatctx, &opt);
  602. if (ret < 0) {
  603. m_pLogApi->Debug("StartWrite Failed when opening output file: %d", ret);
  604. return false;
  605. }
  606. m_pLogApi->Debug("FFmpegWriter StartWrite success");
  607. m_bstart = true;
  608. return true;
  609. }
  610. bool FFmpegWriter::StopWrite() {
  611. /* Write the trailer, if any. The trailer must be written before you
  612. * close the CodecContexts open when you wrote the header; otherwise
  613. * av_write_trailer() may try to use memory that was freed on
  614. * av_codec_close(). */
  615. if (m_formatctx == NULL){
  616. m_pLogApi->Debug("End Failed when oc is null");
  617. return false;
  618. }
  619. if (m_bstart) {
  620. av_write_trailer(m_formatctx);
  621. }
  622. Close();
  623. av_log_set_callback(NULL);
  624. m_bstart = false;
  625. m_pLogApi->Debug("FFmpegWriter End success");
  626. return true;
  627. }
  628. void FFmpegWriter::Close()
  629. {
  630. /* Close each codec. */
  631. if (m_bhave_video) {
  632. close_stream(m_formatctx, m_video_st);
  633. }
  634. if (m_bhave_audio) {
  635. close_stream(m_formatctx, m_audio_st);
  636. }
  637. if (!(m_outfmt->flags & AVFMT_NOFILE)) {
  638. /* Close the output file. */
  639. avio_closep(&m_formatctx->pb);
  640. }
  641. /* free the stream */
  642. if (m_formatctx) {
  643. avformat_free_context(m_formatctx);
  644. }
  645. if (m_video_st != NULL) {
  646. delete m_video_st;
  647. m_video_st = NULL;
  648. }
  649. if (m_audio_st != NULL) {
  650. delete m_audio_st;
  651. m_audio_st = NULL;
  652. }
  653. if (m_audio_input_buffer != NULL) {
  654. delete m_audio_input_buffer;
  655. m_audio_input_buffer = NULL;
  656. }
  657. m_pLogApi->Debug("FFmpegWriter Close success");
  658. }
  659. bool FFmpegWriter::ReceiveAudioData(unsigned char* pData, unsigned long len)
  660. {
  661. if (m_formatctx == NULL || m_audio_st == NULL || !m_bhave_audio){
  662. m_pLogApi->Debug("ReceiveAudioData Failed when oc is null");
  663. return false;
  664. }
  665. //插入audio_input_buffer
  666. m_audio_input_buffer->putBytes(pData, len);
  667. AVFrame* frame = m_audio_st->tmp_frame;
  668. int frame_size = av_samples_get_buffer_size(NULL, frame->channels,
  669. frame->nb_samples,
  670. (AVSampleFormat)frame->format, 0);
  671. //m_pLogApi->Debug("ReceiveAudioData len:%d, available_data_len:%d, frame_size:%d", len, audio_input_buffer->bytesRemaining(), frame_size);
  672. //循环写帧
  673. while (frame_size <= m_audio_input_buffer->bytesRemaining()) {
  674. //m_pLogApi->Debug("ReceiveAudioData available_data_len:%d", audio_input_buffer->bytesRemaining());
  675. int result = write_audio_frame(m_pLogApi, m_formatctx, m_audio_st, m_audio_input_buffer);
  676. if (result < 0) {
  677. m_pLogApi->Debug("write_audio_frame Failed, %d", result);
  678. return false;
  679. }
  680. }
  681. //重设置audio_input_buffer,避免buffer过大
  682. uint8_t* tmp_buffer = new uint8_t[frame_size];
  683. int bytesRemaining = m_audio_input_buffer->bytesRemaining();
  684. m_audio_input_buffer->getBytes(tmp_buffer, bytesRemaining);
  685. m_audio_input_buffer->resize(bytesRemaining);
  686. m_audio_input_buffer->putBytes(tmp_buffer, bytesRemaining);
  687. delete tmp_buffer;
  688. return true;
  689. }
  690. bool FFmpegWriter::ReceiveVideoData(unsigned char* pData, unsigned long len)
  691. {
  692. if (m_formatctx == NULL || m_video_st == NULL || !m_bhave_video){
  693. m_pLogApi->Debug("ReceiveVideoData Failed when oc is null");
  694. return false;
  695. }
  696. //m_pLogApi->Debug("ReceiveVideoData len:%d", len);
  697. int result = write_video_frame(m_pLogApi, m_formatctx, m_video_st, (char*)pData, len, m_input_pix_fmt);
  698. if (result < 0){
  699. m_pLogApi->Debug("write_video_frame Failed, %d", result);
  700. return false;
  701. }
  702. return true;
  703. }