FFmpegKit Android API  4.4
fftools_ffmpeg.c
Go to the documentation of this file.
1 /*
2  * Copyright (c) 2000-2003 Fabrice Bellard
3  *
4  * This file is part of FFmpeg.
5  *
6  * FFmpeg is free software; you can redistribute it and/or
7  * modify it under the terms of the GNU Lesser General Public
8  * License as published by the Free Software Foundation; either
9  * version 2.1 of the License, or (at your option) any later version.
10  *
11  * FFmpeg is distributed in the hope that it will be useful,
12  * but WITHOUT ANY WARRANTY; without even the implied warranty of
13  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14  * Lesser General Public License for more details.
15  *
16  * You should have received a copy of the GNU Lesser General Public
17  * License along with FFmpeg; if not, write to the Free Software
18  * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
19  */
20 
26 /*
27  * CHANGES 01.2021
28  * - NDK r22 incompatibility issues regarding INT64_MAX fixed
29  *
30  * CHANGES 06.2020
31  * - ignoring signals implemented
32  * - cancel_operation() method signature updated with id
33  * - cancel by execution id implemented
34  *
35  * CHANGES 01.2020
36  * - ffprobe support changes
37  *
38  * CHANGES 12.2019
39  * - concurrent execution support
40  *
41  * CHANGES 08.2018
42  * --------------------------------------------------------
43  * - fftools_ prefix added to file name and parent headers
44  * - forward_report() method, report_callback function pointer and set_report_callback() setter
45  * method added to forward stats.
46  * - forward_report() call added from print_report()
47  * - cancel_operation() method added to trigger sigterm_handler
48  * - (!received_sigterm) validation added inside ifilter_send_eof() to complete cancellation
49  *
50  * CHANGES 07.2018
51  * --------------------------------------------------------
52  * - main() function renamed as ffmpeg_execute()
53  * - exit_program() implemented with setjmp
54  * - extern longjmp_value added to access exit code stored in exit_program()
55  * - ffmpeg_var_cleanup() method added
56  */
57 
58 #include "config.h"
59 #include <ctype.h>
60 #include <string.h>
61 #include <math.h>
62 #include <stdlib.h>
63 #include <errno.h>
64 #include <limits.h>
65 #include <stdatomic.h>
66 #include <stdint.h>
67 
68 #include "ffmpegkit_exception.h"
69 
70 #if HAVE_IO_H
71 #include <io.h>
72 #endif
73 #if HAVE_UNISTD_H
74 #include <unistd.h>
75 #endif
76 
77 #include "libavformat/avformat.h"
78 #include "libavdevice/avdevice.h"
79 #include "libswresample/swresample.h"
80 #include "libavutil/opt.h"
81 #include "libavutil/channel_layout.h"
82 #include "libavutil/parseutils.h"
83 #include "libavutil/samplefmt.h"
84 #include "libavutil/fifo.h"
85 #include "libavutil/hwcontext.h"
86 #include "libavutil/internal.h"
87 #include "libavutil/intreadwrite.h"
88 #include "libavutil/dict.h"
89 #include "libavutil/display.h"
90 #include "libavutil/mathematics.h"
91 #include "libavutil/pixdesc.h"
92 #include "libavutil/avstring.h"
93 #include "libavutil/libm.h"
94 #include "libavutil/imgutils.h"
95 #include "libavutil/timestamp.h"
96 #include "libavutil/bprint.h"
97 #include "libavutil/time.h"
98 #include "libavutil/thread.h"
99 #include "libavutil/threadmessage.h"
100 #include "libavcodec/mathops.h"
101 #include "libavformat/os_support.h"
102 
103 # include "libavfilter/avfilter.h"
104 # include "libavfilter/buffersrc.h"
105 # include "libavfilter/buffersink.h"
106 
107 #if HAVE_SYS_RESOURCE_H
108 #include <sys/time.h>
109 #include <sys/types.h>
110 #include <sys/resource.h>
111 #elif HAVE_GETPROCESSTIMES
112 #include <windows.h>
113 #endif
114 #if HAVE_GETPROCESSMEMORYINFO
115 #include <windows.h>
116 #include <psapi.h>
117 #endif
118 #if HAVE_SETCONSOLECTRLHANDLER
119 #include <windows.h>
120 #endif
121 
122 
123 #if HAVE_SYS_SELECT_H
124 #include <sys/select.h>
125 #endif
126 
127 #if HAVE_TERMIOS_H
128 #include <fcntl.h>
129 #include <sys/ioctl.h>
130 #include <sys/time.h>
131 #include <termios.h>
132 #elif HAVE_KBHIT
133 #include <conio.h>
134 #endif
135 
136 #include <time.h>
137 
138 #include "fftools_ffmpeg.h"
139 #include "fftools_cmdutils.h"
140 
141 #include "libavutil/avassert.h"
142 
143 static FILE *vstats_file;
144 
145 const char *const forced_keyframes_const_names[] = {
146  "n",
147  "n_forced",
148  "prev_forced_n",
149  "prev_forced_t",
150  "t",
151  NULL
152 };
153 
154 typedef struct BenchmarkTimeStamps {
155  int64_t real_usec;
156  int64_t user_usec;
157  int64_t sys_usec;
159 
160 static void do_video_stats(OutputStream *ost, int frame_size);
162 static int64_t getmaxrss(void);
164 
165 __thread int run_as_daemon = 0;
166 __thread int nb_frames_dup = 0;
167 __thread unsigned dup_warning = 1000;
168 __thread int nb_frames_drop = 0;
169 __thread int64_t decode_error_stat[2];
170 __thread unsigned nb_output_dumped = 0;
171 
172 __thread int want_sdp = 1;
173 
175 __thread AVIOContext *progress_avio = NULL;
176 
177 __thread uint8_t *subtitle_out;
178 
179 __thread InputStream **input_streams = NULL;
180 __thread int nb_input_streams = 0;
181 __thread InputFile **input_files = NULL;
182 __thread int nb_input_files = 0;
183 
184 __thread OutputStream **output_streams = NULL;
185 __thread int nb_output_streams = 0;
186 __thread OutputFile **output_files = NULL;
187 __thread int nb_output_files = 0;
188 
190 __thread int nb_filtergraphs;
191 
192 __thread int64_t last_time = -1;
193 __thread int64_t keyboard_last_time = 0;
194 __thread int first_report = 1;
195 __thread int qp_histogram[52];
196 
197 void (*report_callback)(int, float, float, int64_t, int, double, double) = NULL;
198 
199 extern __thread int file_overwrite;
200 extern __thread int no_file_overwrite;
201 extern __thread int ignore_unknown_streams;
202 extern __thread int copy_unknown_streams;
203 extern int opt_map(void *optctx, const char *opt, const char *arg);
204 extern int opt_map_channel(void *optctx, const char *opt, const char *arg);
205 extern int opt_recording_timestamp(void *optctx, const char *opt, const char *arg);
206 extern int opt_data_frames(void *optctx, const char *opt, const char *arg);
207 extern int opt_progress(void *optctx, const char *opt, const char *arg);
208 extern int opt_target(void *optctx, const char *opt, const char *arg);
209 extern int opt_vsync(void *optctx, const char *opt, const char *arg);
210 extern int opt_abort_on(void *optctx, const char *opt, const char *arg);
211 extern int opt_stats_period(void *optctx, const char *opt, const char *arg);
212 extern int opt_qscale(void *optctx, const char *opt, const char *arg);
213 extern int opt_profile(void *optctx, const char *opt, const char *arg);
214 extern int opt_filter_complex(void *optctx, const char *opt, const char *arg);
215 extern int opt_filter_complex_script(void *optctx, const char *opt, const char *arg);
216 extern int opt_attach(void *optctx, const char *opt, const char *arg);
217 extern int opt_video_frames(void *optctx, const char *opt, const char *arg);
218 extern __thread int intra_only;
219 extern int opt_video_codec(void *optctx, const char *opt, const char *arg);
220 extern int opt_sameq(void *optctx, const char *opt, const char *arg);
221 extern int opt_timecode(void *optctx, const char *opt, const char *arg);
222 extern __thread int do_psnr;
223 extern int opt_vstats_file(void *optctx, const char *opt, const char *arg);
224 extern int opt_vstats(void *optctx, const char *opt, const char *arg);
225 extern int opt_video_frames(void *optctx, const char *opt, const char *arg);
226 extern int opt_old2new(void *optctx, const char *opt, const char *arg);
227 extern int opt_streamid(void *optctx, const char *opt, const char *arg);
228 extern int opt_bitrate(void *optctx, const char *opt, const char *arg);
229 extern int show_hwaccels(void *optctx, const char *opt, const char *arg);
230 extern int opt_video_filters(void *optctx, const char *opt, const char *arg);
231 extern int opt_audio_frames(void *optctx, const char *opt, const char *arg);
232 extern int opt_audio_qscale(void *optctx, const char *opt, const char *arg);
233 extern int opt_audio_codec(void *optctx, const char *opt, const char *arg);
234 extern int opt_channel_layout(void *optctx, const char *opt, const char *arg);
235 extern int opt_preset(void *optctx, const char *opt, const char *arg);
236 extern int opt_audio_filters(void *optctx, const char *opt, const char *arg);
237 extern int opt_subtitle_codec(void *optctx, const char *opt, const char *arg);
238 extern int opt_video_channel(void *optctx, const char *opt, const char *arg);
239 extern int opt_video_standard(void *optctx, const char *opt, const char *arg);
240 extern int opt_sdp_file(void *optctx, const char *opt, const char *arg);
241 extern int opt_data_codec(void *optctx, const char *opt, const char *arg);
242 extern int opt_init_hw_device(void *optctx, const char *opt, const char *arg);
243 extern int opt_filter_hw_device(void *optctx, const char *opt, const char *arg);
244 extern __thread int input_sync;
245 
246 #if HAVE_TERMIOS_H
247 
248 /* init terminal so that we can grab keys */
249 __thread struct termios oldtty;
250 __thread int restore_tty;
251 #endif
252 
253 #if HAVE_THREADS
254 static void free_input_threads(void);
255 #endif
256 
257 extern volatile int handleSIGQUIT;
258 extern volatile int handleSIGINT;
259 extern volatile int handleSIGTERM;
260 extern volatile int handleSIGXCPU;
261 extern volatile int handleSIGPIPE;
262 
263 extern __thread volatile long sessionId;
264 extern void cancelSession(long id);
265 extern int cancelRequested(long id);
266 
267 /* sub2video hack:
268  Convert subtitles to video with alpha to insert them in filter graphs.
269  This is a temporary solution until libavfilter gets real subtitles support.
270  */
271 
273 {
274  int ret;
275  AVFrame *frame = ist->sub2video.frame;
276 
277  av_frame_unref(frame);
278  ist->sub2video.frame->width = ist->dec_ctx->width ? ist->dec_ctx->width : ist->sub2video.w;
279  ist->sub2video.frame->height = ist->dec_ctx->height ? ist->dec_ctx->height : ist->sub2video.h;
280  ist->sub2video.frame->format = AV_PIX_FMT_RGB32;
281  if ((ret = av_frame_get_buffer(frame, 0)) < 0)
282  return ret;
283  memset(frame->data[0], 0, frame->height * frame->linesize[0]);
284  return 0;
285 }
286 
287 static void sub2video_copy_rect(uint8_t *dst, int dst_linesize, int w, int h,
288  AVSubtitleRect *r)
289 {
290  uint32_t *pal, *dst2;
291  uint8_t *src, *src2;
292  int x, y;
293 
294  if (r->type != SUBTITLE_BITMAP) {
295  av_log(NULL, AV_LOG_WARNING, "sub2video: non-bitmap subtitle\n");
296  return;
297  }
298  if (r->x < 0 || r->x + r->w > w || r->y < 0 || r->y + r->h > h) {
299  av_log(NULL, AV_LOG_WARNING, "sub2video: rectangle (%d %d %d %d) overflowing %d %d\n",
300  r->x, r->y, r->w, r->h, w, h
301  );
302  return;
303  }
304 
305  dst += r->y * dst_linesize + r->x * 4;
306  src = r->data[0];
307  pal = (uint32_t *)r->data[1];
308  for (y = 0; y < r->h; y++) {
309  dst2 = (uint32_t *)dst;
310  src2 = src;
311  for (x = 0; x < r->w; x++)
312  *(dst2++) = pal[*(src2++)];
313  dst += dst_linesize;
314  src += r->linesize[0];
315  }
316 }
317 
318 static void sub2video_push_ref(InputStream *ist, int64_t pts)
319 {
320  AVFrame *frame = ist->sub2video.frame;
321  int i;
322  int ret;
323 
324  av_assert1(frame->data[0]);
325  ist->sub2video.last_pts = frame->pts = pts;
326  for (i = 0; i < ist->nb_filters; i++) {
327  ret = av_buffersrc_add_frame_flags(ist->filters[i]->filter, frame,
328  AV_BUFFERSRC_FLAG_KEEP_REF |
329  AV_BUFFERSRC_FLAG_PUSH);
330  if (ret != AVERROR_EOF && ret < 0)
331  av_log(NULL, AV_LOG_WARNING, "Error while add the frame to buffer source(%s).\n",
332  av_err2str(ret));
333  }
334 }
335 
336 void sub2video_update(InputStream *ist, int64_t heartbeat_pts, AVSubtitle *sub)
337 {
338  AVFrame *frame = ist->sub2video.frame;
339  int8_t *dst;
340  int dst_linesize;
341  int num_rects, i;
342  int64_t pts, end_pts;
343 
344  if (!frame)
345  return;
346  if (sub) {
347  pts = av_rescale_q(sub->pts + sub->start_display_time * 1000LL,
348  AV_TIME_BASE_Q, ist->st->time_base);
349  end_pts = av_rescale_q(sub->pts + sub->end_display_time * 1000LL,
350  AV_TIME_BASE_Q, ist->st->time_base);
351  num_rects = sub->num_rects;
352  } else {
353  /* If we are initializing the system, utilize current heartbeat
354  PTS as the start time, and show until the following subpicture
355  is received. Otherwise, utilize the previous subpicture's end time
356  as the fall-back value. */
357  pts = ist->sub2video.initialize ?
358  heartbeat_pts : ist->sub2video.end_pts;
359  end_pts = INT64_MAX;
360  num_rects = 0;
361  }
362  if (sub2video_get_blank_frame(ist) < 0) {
363  av_log(ist->dec_ctx, AV_LOG_ERROR,
364  "Impossible to get a blank canvas.\n");
365  return;
366  }
367  dst = frame->data [0];
368  dst_linesize = frame->linesize[0];
369  for (i = 0; i < num_rects; i++)
370  sub2video_copy_rect(dst, dst_linesize, frame->width, frame->height, sub->rects[i]);
371  sub2video_push_ref(ist, pts);
372  ist->sub2video.end_pts = end_pts;
373  ist->sub2video.initialize = 0;
374 }
375 
376 static void sub2video_heartbeat(InputStream *ist, int64_t pts)
377 {
378  InputFile *infile = input_files[ist->file_index];
379  int i, j, nb_reqs;
380  int64_t pts2;
381 
382  /* When a frame is read from a file, examine all sub2video streams in
383  the same file and send the sub2video frame again. Otherwise, decoded
384  video frames could be accumulating in the filter graph while a filter
385  (possibly overlay) is desperately waiting for a subtitle frame. */
386  for (i = 0; i < infile->nb_streams; i++) {
387  InputStream *ist2 = input_streams[infile->ist_index + i];
388  if (!ist2->sub2video.frame)
389  continue;
390  /* subtitles seem to be usually muxed ahead of other streams;
391  if not, subtracting a larger time here is necessary */
392  pts2 = av_rescale_q(pts, ist->st->time_base, ist2->st->time_base) - 1;
393  /* do not send the heartbeat frame if the subtitle is already ahead */
394  if (pts2 <= ist2->sub2video.last_pts)
395  continue;
396  if (pts2 >= ist2->sub2video.end_pts || ist2->sub2video.initialize)
397  /* if we have hit the end of the current displayed subpicture,
398  or if we need to initialize the system, update the
399  overlayed subpicture and its start/end times */
400  sub2video_update(ist2, pts2 + 1, NULL);
401  for (j = 0, nb_reqs = 0; j < ist2->nb_filters; j++)
402  nb_reqs += av_buffersrc_get_nb_failed_requests(ist2->filters[j]->filter);
403  if (nb_reqs)
404  sub2video_push_ref(ist2, pts2);
405  }
406 }
407 
408 static void sub2video_flush(InputStream *ist)
409 {
410  int i;
411  int ret;
412 
413  if (ist->sub2video.end_pts < INT64_MAX)
414  sub2video_update(ist, INT64_MAX, NULL);
415  for (i = 0; i < ist->nb_filters; i++) {
416  ret = av_buffersrc_add_frame(ist->filters[i]->filter, NULL);
417  if (ret != AVERROR_EOF && ret < 0)
418  av_log(NULL, AV_LOG_WARNING, "Flush the frame error.\n");
419  }
420 }
421 
422 /* end of sub2video hack */
423 
424 static void term_exit_sigsafe(void)
425 {
426 #if HAVE_TERMIOS_H
427  if(restore_tty)
428  tcsetattr (0, TCSANOW, &oldtty);
429 #endif
430 }
431 
432 void term_exit(void)
433 {
434  av_log(NULL, AV_LOG_QUIET, "%s", "");
436 }
437 
438 static volatile int received_sigterm = 0;
439 static volatile int received_nb_signals = 0;
440 __thread atomic_int transcode_init_done = ATOMIC_VAR_INIT(0);
441 __thread volatile int ffmpeg_exited = 0;
442 __thread volatile int main_ffmpeg_return_code = 0;
443 __thread int64_t copy_ts_first_pts = AV_NOPTS_VALUE;
444 extern __thread volatile int longjmp_value;
445 
446 static void
448 {
449  int ret;
450  received_sigterm = sig;
453  if(received_nb_signals > 3) {
454  ret = write(2/*STDERR_FILENO*/, "Received > 3 system signals, hard exiting\n",
455  strlen("Received > 3 system signals, hard exiting\n"));
456  if (ret < 0) { /* Do nothing */ };
457  exit(123);
458  }
459 }
460 
461 #if HAVE_SETCONSOLECTRLHANDLER
462 static BOOL WINAPI CtrlHandler(DWORD fdwCtrlType)
463 {
464  av_log(NULL, AV_LOG_DEBUG, "\nReceived windows signal %ld\n", fdwCtrlType);
465 
466  switch (fdwCtrlType)
467  {
468  case CTRL_C_EVENT:
469  case CTRL_BREAK_EVENT:
470  sigterm_handler(SIGINT);
471  return TRUE;
472 
473  case CTRL_CLOSE_EVENT:
474  case CTRL_LOGOFF_EVENT:
475  case CTRL_SHUTDOWN_EVENT:
476  sigterm_handler(SIGTERM);
477  /* Basically, with these 3 events, when we return from this method the
478  process is hard terminated, so stall as long as we need to
479  to try and let the main thread(s) clean up and gracefully terminate
480  (we have at most 5 seconds, but should be done far before that). */
481  while (!ffmpeg_exited) {
482  Sleep(0);
483  }
484  return TRUE;
485 
486  default:
487  av_log(NULL, AV_LOG_ERROR, "Received unknown windows signal %ld\n", fdwCtrlType);
488  return FALSE;
489  }
490 }
491 #endif
492 
493 #ifdef __linux__
494 #define SIGNAL(sig, func) \
495  do { \
496  action.sa_handler = func; \
497  sigaction(sig, &action, NULL); \
498  } while (0)
499 #else
500 #define SIGNAL(sig, func) \
501  signal(sig, func)
502 #endif
503 
504 void term_init(void)
505 {
506 #if defined __linux__
507  struct sigaction action = {0};
508  action.sa_handler = sigterm_handler;
509 
510  /* block other interrupts while processing this one */
511  sigfillset(&action.sa_mask);
512 
513  /* restart interruptible functions (i.e. don't fail with EINTR) */
514  action.sa_flags = SA_RESTART;
515 #endif
516 
517 #if HAVE_TERMIOS_H
519  struct termios tty;
520  if (tcgetattr (0, &tty) == 0) {
521  oldtty = tty;
522  restore_tty = 1;
523 
524  tty.c_iflag &= ~(IGNBRK|BRKINT|PARMRK|ISTRIP
525  |INLCR|IGNCR|ICRNL|IXON);
526  tty.c_oflag |= OPOST;
527  tty.c_lflag &= ~(ECHO|ECHONL|ICANON|IEXTEN);
528  tty.c_cflag &= ~(CSIZE|PARENB);
529  tty.c_cflag |= CS8;
530  tty.c_cc[VMIN] = 1;
531  tty.c_cc[VTIME] = 0;
532 
533  tcsetattr (0, TCSANOW, &tty);
534  }
535  if (handleSIGQUIT == 1) {
536  signal(SIGQUIT, sigterm_handler); /* Quit (POSIX). */
537  }
538  }
539 #endif
540 
541  if (handleSIGINT == 1) {
542  signal(SIGINT , sigterm_handler); /* Interrupt (ANSI). */
543  }
544  if (handleSIGTERM == 1) {
545  signal(SIGTERM, sigterm_handler); /* Termination (ANSI). */
546  }
547 #ifdef SIGXCPU
548  if (handleSIGXCPU == 1) {
549  signal(SIGXCPU, sigterm_handler);
550  }
551 #endif
552 #ifdef SIGPIPE
553  if (handleSIGPIPE == 1) {
554  signal(SIGPIPE, SIG_IGN); /* Broken pipe (POSIX). */
555  }
556 #endif
557 #if HAVE_SETCONSOLECTRLHANDLER
558  SetConsoleCtrlHandler((PHANDLER_ROUTINE) CtrlHandler, TRUE);
559 #endif
560 }
561 
562 /* read a key without blocking */
563 static int read_key(void)
564 {
565  unsigned char ch;
566 #if HAVE_TERMIOS_H
567  int n = 1;
568  struct timeval tv;
569  fd_set rfds;
570 
571  FD_ZERO(&rfds);
572  FD_SET(0, &rfds);
573  tv.tv_sec = 0;
574  tv.tv_usec = 0;
575  n = select(1, &rfds, NULL, NULL, &tv);
576  if (n > 0) {
577  n = read(0, &ch, 1);
578  if (n == 1)
579  return ch;
580 
581  return n;
582  }
583 #elif HAVE_KBHIT
584 # if HAVE_PEEKNAMEDPIPE
585  static int is_pipe;
586  static HANDLE input_handle;
587  DWORD dw, nchars;
588  if(!input_handle){
589  input_handle = GetStdHandle(STD_INPUT_HANDLE);
590  is_pipe = !GetConsoleMode(input_handle, &dw);
591  }
592 
593  if (is_pipe) {
594  /* When running under a GUI, you will end here. */
595  if (!PeekNamedPipe(input_handle, NULL, 0, NULL, &nchars, NULL)) {
596  // input pipe may have been closed by the program that ran ffmpeg
597  return -1;
598  }
599  //Read it
600  if(nchars != 0) {
601  read(0, &ch, 1);
602  return ch;
603  }else{
604  return -1;
605  }
606  }
607 # endif
608  if(kbhit())
609  return(getch());
610 #endif
611  return -1;
612 }
613 
614 int decode_interrupt_cb(void *ctx);
615 
616 int decode_interrupt_cb(void *ctx)
617 {
618  return received_nb_signals > atomic_load(&transcode_init_done);
619 }
620 
621 __thread const AVIOInterruptCB int_cb = { decode_interrupt_cb, NULL };
622 
623 static void ffmpeg_cleanup(int ret)
624 {
625  int i, j;
626 
627  if (do_benchmark) {
628  int maxrss = getmaxrss() / 1024;
629  av_log(NULL, AV_LOG_INFO, "bench: maxrss=%ikB\n", maxrss);
630  }
631 
632  for (i = 0; i < nb_filtergraphs; i++) {
633  FilterGraph *fg = filtergraphs[i];
634  avfilter_graph_free(&fg->graph);
635  for (j = 0; j < fg->nb_inputs; j++) {
636  InputFilter *ifilter = fg->inputs[j];
637  struct InputStream *ist = ifilter->ist;
638 
639  while (av_fifo_size(ifilter->frame_queue)) {
640  AVFrame *frame;
641  av_fifo_generic_read(ifilter->frame_queue, &frame,
642  sizeof(frame), NULL);
643  av_frame_free(&frame);
644  }
645  av_fifo_freep(&ifilter->frame_queue);
646  if (ist->sub2video.sub_queue) {
647  while (av_fifo_size(ist->sub2video.sub_queue)) {
648  AVSubtitle sub;
649  av_fifo_generic_read(ist->sub2video.sub_queue,
650  &sub, sizeof(sub), NULL);
651  avsubtitle_free(&sub);
652  }
653  av_fifo_freep(&ist->sub2video.sub_queue);
654  }
655  av_buffer_unref(&ifilter->hw_frames_ctx);
656  av_freep(&ifilter->name);
657  av_freep(&fg->inputs[j]);
658  }
659  av_freep(&fg->inputs);
660  for (j = 0; j < fg->nb_outputs; j++) {
661  OutputFilter *ofilter = fg->outputs[j];
662 
663  avfilter_inout_free(&ofilter->out_tmp);
664  av_freep(&ofilter->name);
665  av_freep(&ofilter->formats);
666  av_freep(&ofilter->channel_layouts);
667  av_freep(&ofilter->sample_rates);
668  av_freep(&fg->outputs[j]);
669  }
670  av_freep(&fg->outputs);
671  av_freep(&fg->graph_desc);
672 
673  av_freep(&filtergraphs[i]);
674  }
675  av_freep(&filtergraphs);
676 
677  av_freep(&subtitle_out);
678 
679  /* close files */
680  for (i = 0; i < nb_output_files; i++) {
681  OutputFile *of = output_files[i];
682  AVFormatContext *s;
683  if (!of)
684  continue;
685  s = of->ctx;
686  if (s && s->oformat && !(s->oformat->flags & AVFMT_NOFILE))
687  avio_closep(&s->pb);
688  avformat_free_context(s);
689  av_dict_free(&of->opts);
690 
691  av_freep(&output_files[i]);
692  }
693  for (i = 0; i < nb_output_streams; i++) {
694  OutputStream *ost = output_streams[i];
695 
696  if (!ost)
697  continue;
698 
699  av_bsf_free(&ost->bsf_ctx);
700 
701  av_frame_free(&ost->filtered_frame);
702  av_frame_free(&ost->last_frame);
703  av_dict_free(&ost->encoder_opts);
704 
705  av_freep(&ost->forced_keyframes);
706  av_expr_free(ost->forced_keyframes_pexpr);
707  av_freep(&ost->avfilter);
708  av_freep(&ost->logfile_prefix);
709 
710  av_freep(&ost->audio_channels_map);
711  ost->audio_channels_mapped = 0;
712 
713  av_dict_free(&ost->sws_dict);
714  av_dict_free(&ost->swr_opts);
715 
716  avcodec_free_context(&ost->enc_ctx);
717  avcodec_parameters_free(&ost->ref_par);
718 
719  if (ost->muxing_queue) {
720  while (av_fifo_size(ost->muxing_queue)) {
721  AVPacket pkt;
722  av_fifo_generic_read(ost->muxing_queue, &pkt, sizeof(pkt), NULL);
723  av_packet_unref(&pkt);
724  }
725  av_fifo_freep(&ost->muxing_queue);
726  }
727 
728  av_freep(&output_streams[i]);
729  }
730 #if HAVE_THREADS
731  free_input_threads();
732 #endif
733  for (i = 0; i < nb_input_files; i++) {
735  av_freep(&input_files[i]);
736  }
737  for (i = 0; i < nb_input_streams; i++) {
738  InputStream *ist = input_streams[i];
739 
740  av_frame_free(&ist->decoded_frame);
741  av_frame_free(&ist->filter_frame);
742  av_dict_free(&ist->decoder_opts);
743  avsubtitle_free(&ist->prev_sub.subtitle);
744  av_frame_free(&ist->sub2video.frame);
745  av_freep(&ist->filters);
746  av_freep(&ist->hwaccel_device);
747  av_freep(&ist->dts_buffer);
748 
749  avcodec_free_context(&ist->dec_ctx);
750 
751  av_freep(&input_streams[i]);
752  }
753 
754  if (vstats_file) {
755  if (fclose(vstats_file))
756  av_log(NULL, AV_LOG_ERROR,
757  "Error closing vstats file, loss of information possible: %s\n",
758  av_err2str(AVERROR(errno)));
759  }
760  av_freep(&vstats_filename);
761 
762  av_freep(&input_streams);
763  av_freep(&input_files);
764  av_freep(&output_streams);
765  av_freep(&output_files);
766 
767  uninit_opts();
768 
769  avformat_network_deinit();
770 
771  if (received_sigterm) {
772  av_log(NULL, AV_LOG_INFO, "Exiting normally, received signal %d.\n",
773  (int) received_sigterm);
774  } else if (cancelRequested(sessionId)) {
775  av_log(NULL, AV_LOG_INFO, "Exiting normally, received cancel request.\n");
776  } else if (ret && atomic_load(&transcode_init_done)) {
777  av_log(NULL, AV_LOG_INFO, "Conversion failed!\n");
778  }
779  term_exit();
780  ffmpeg_exited = 1;
781 }
782 
783 void remove_avoptions(AVDictionary **a, AVDictionary *b)
784 {
785  AVDictionaryEntry *t = NULL;
786 
787  while ((t = av_dict_get(b, "", t, AV_DICT_IGNORE_SUFFIX))) {
788  av_dict_set(a, t->key, NULL, AV_DICT_MATCH_CASE);
789  }
790 }
791 
792 void assert_avoptions(AVDictionary *m)
793 {
794  AVDictionaryEntry *t;
795  if ((t = av_dict_get(m, "", NULL, AV_DICT_IGNORE_SUFFIX))) {
796  av_log(NULL, AV_LOG_FATAL, "Option %s not found.\n", t->key);
797  exit_program(1);
798  }
799 }
800 
801 static void abort_codec_experimental(AVCodec *c, int encoder)
802 {
803  exit_program(1);
804 }
805 
806 static void update_benchmark(const char *fmt, ...)
807 {
808  if (do_benchmark_all) {
810  va_list va;
811  char buf[1024];
812 
813  if (fmt) {
814  va_start(va, fmt);
815  vsnprintf(buf, sizeof(buf), fmt, va);
816  va_end(va);
817  av_log(NULL, AV_LOG_INFO,
818  "bench: %8" PRIu64 " user %8" PRIu64 " sys %8" PRIu64 " real %s \n",
821  t.real_usec - current_time.real_usec, buf);
822  }
823  current_time = t;
824  }
825 }
826 
827 static void close_all_output_streams(OutputStream *ost, OSTFinished this_stream, OSTFinished others)
828 {
829  int i;
830  for (i = 0; i < nb_output_streams; i++) {
831  OutputStream *ost2 = output_streams[i];
832  ost2->finished |= ost == ost2 ? this_stream : others;
833  }
834 }
835 
836 static void write_packet(OutputFile *of, AVPacket *pkt, OutputStream *ost, int unqueue)
837 {
838  AVFormatContext *s = of->ctx;
839  AVStream *st = ost->st;
840  int ret;
841 
842  /*
843  * Audio encoders may split the packets -- #frames in != #packets out.
844  * But there is no reordering, so we can limit the number of output packets
845  * by simply dropping them here.
846  * Counting encoded video frames needs to be done separately because of
847  * reordering, see do_video_out().
848  * Do not count the packet when unqueued because it has been counted when queued.
849  */
850  if (!(st->codecpar->codec_type == AVMEDIA_TYPE_VIDEO && ost->encoding_needed) && !unqueue) {
851  if (ost->frame_number >= ost->max_frames) {
852  av_packet_unref(pkt);
853  return;
854  }
855  ost->frame_number++;
856  }
857 
858  if (!of->header_written) {
859  AVPacket tmp_pkt = {0};
860  /* the muxer is not initialized yet, buffer the packet */
861  if (!av_fifo_space(ost->muxing_queue)) {
862  unsigned int are_we_over_size =
863  (ost->muxing_queue_data_size + pkt->size) > ost->muxing_queue_data_threshold;
864  int new_size = are_we_over_size ?
865  FFMIN(2 * av_fifo_size(ost->muxing_queue),
866  ost->max_muxing_queue_size) :
867  2 * av_fifo_size(ost->muxing_queue);
868 
869  if (new_size <= av_fifo_size(ost->muxing_queue)) {
870  av_log(NULL, AV_LOG_ERROR,
871  "Too many packets buffered for output stream %d:%d.\n",
872  ost->file_index, ost->st->index);
873  exit_program(1);
874  }
875  ret = av_fifo_realloc2(ost->muxing_queue, new_size);
876  if (ret < 0)
877  exit_program(1);
878  }
879  ret = av_packet_make_refcounted(pkt);
880  if (ret < 0)
881  exit_program(1);
882  av_packet_move_ref(&tmp_pkt, pkt);
883  ost->muxing_queue_data_size += tmp_pkt.size;
884  av_fifo_generic_write(ost->muxing_queue, &tmp_pkt, sizeof(tmp_pkt), NULL);
885  return;
886  }
887 
888  if ((st->codecpar->codec_type == AVMEDIA_TYPE_VIDEO && video_sync_method == VSYNC_DROP) ||
889  (st->codecpar->codec_type == AVMEDIA_TYPE_AUDIO && audio_sync_method < 0))
890  pkt->pts = pkt->dts = AV_NOPTS_VALUE;
891 
892  if (st->codecpar->codec_type == AVMEDIA_TYPE_VIDEO) {
893  int i;
894  uint8_t *sd = av_packet_get_side_data(pkt, AV_PKT_DATA_QUALITY_STATS,
895  NULL);
896  ost->quality = sd ? AV_RL32(sd) : -1;
897  ost->pict_type = sd ? sd[4] : AV_PICTURE_TYPE_NONE;
898 
899  for (i = 0; i<FF_ARRAY_ELEMS(ost->error); i++) {
900  if (sd && i < sd[5])
901  ost->error[i] = AV_RL64(sd + 8 + 8*i);
902  else
903  ost->error[i] = -1;
904  }
905 
906  if (ost->frame_rate.num && ost->is_cfr) {
907  if (pkt->duration > 0)
908  av_log(NULL, AV_LOG_WARNING, "Overriding packet duration by frame rate, this should not happen\n");
909  pkt->duration = av_rescale_q(1, av_inv_q(ost->frame_rate),
910  ost->mux_timebase);
911  }
912  }
913 
914  av_packet_rescale_ts(pkt, ost->mux_timebase, ost->st->time_base);
915 
916  if (!(s->oformat->flags & AVFMT_NOTIMESTAMPS)) {
917  if (pkt->dts != AV_NOPTS_VALUE &&
918  pkt->pts != AV_NOPTS_VALUE &&
919  pkt->dts > pkt->pts) {
920  av_log(s, AV_LOG_WARNING, "Invalid DTS: %"PRId64" PTS: %"PRId64" in output stream %d:%d, replacing by guess\n",
921  pkt->dts, pkt->pts,
922  ost->file_index, ost->st->index);
923  pkt->pts =
924  pkt->dts = pkt->pts + pkt->dts + ost->last_mux_dts + 1
925  - FFMIN3(pkt->pts, pkt->dts, ost->last_mux_dts + 1)
926  - FFMAX3(pkt->pts, pkt->dts, ost->last_mux_dts + 1);
927  }
928  if ((st->codecpar->codec_type == AVMEDIA_TYPE_AUDIO || st->codecpar->codec_type == AVMEDIA_TYPE_VIDEO || st->codecpar->codec_type == AVMEDIA_TYPE_SUBTITLE) &&
929  pkt->dts != AV_NOPTS_VALUE &&
930  !(st->codecpar->codec_id == AV_CODEC_ID_VP9 && ost->stream_copy) &&
931  ost->last_mux_dts != AV_NOPTS_VALUE) {
932  int64_t max = ost->last_mux_dts + !(s->oformat->flags & AVFMT_TS_NONSTRICT);
933  if (pkt->dts < max) {
934  int loglevel = max - pkt->dts > 2 || st->codecpar->codec_type == AVMEDIA_TYPE_VIDEO ? AV_LOG_WARNING : AV_LOG_DEBUG;
935  if (exit_on_error)
936  loglevel = AV_LOG_ERROR;
937  av_log(s, loglevel, "Non-monotonous DTS in output stream "
938  "%d:%d; previous: %"PRId64", current: %"PRId64"; ",
939  ost->file_index, ost->st->index, ost->last_mux_dts, pkt->dts);
940  if (exit_on_error) {
941  av_log(NULL, AV_LOG_FATAL, "aborting.\n");
942  exit_program(1);
943  }
944  av_log(s, loglevel, "changing to %"PRId64". This may result "
945  "in incorrect timestamps in the output file.\n",
946  max);
947  if (pkt->pts >= pkt->dts)
948  pkt->pts = FFMAX(pkt->pts, max);
949  pkt->dts = max;
950  }
951  }
952  }
953  ost->last_mux_dts = pkt->dts;
954 
955  ost->data_size += pkt->size;
956  ost->packets_written++;
957 
958  pkt->stream_index = ost->index;
959 
960  if (debug_ts) {
961  av_log(NULL, AV_LOG_INFO, "muxer <- type:%s "
962  "pkt_pts:%s pkt_pts_time:%s pkt_dts:%s pkt_dts_time:%s size:%d\n",
963  av_get_media_type_string(ost->enc_ctx->codec_type),
964  av_ts2str(pkt->pts), av_ts2timestr(pkt->pts, &ost->st->time_base),
965  av_ts2str(pkt->dts), av_ts2timestr(pkt->dts, &ost->st->time_base),
966  pkt->size
967  );
968  }
969 
970  ret = av_interleaved_write_frame(s, pkt);
971  if (ret < 0) {
972  print_error("av_interleaved_write_frame()", ret);
975  }
976  av_packet_unref(pkt);
977 }
978 
980 {
981  OutputFile *of = output_files[ost->file_index];
982 
983  ost->finished |= ENCODER_FINISHED;
984  if (of->shortest) {
985  int64_t end = av_rescale_q(ost->sync_opts - ost->first_pts, ost->enc_ctx->time_base, AV_TIME_BASE_Q);
986  of->recording_time = FFMIN(of->recording_time, end);
987  }
988 }
989 
990 /*
991  * Send a single packet to the output, applying any bitstream filters
992  * associated with the output stream. This may result in any number
993  * of packets actually being written, depending on what bitstream
994  * filters are applied. The supplied packet is consumed and will be
995  * blank (as if newly-allocated) when this function returns.
996  *
997  * If eof is set, instead indicate EOF to all bitstream filters and
998  * therefore flush any delayed packets to the output. A blank packet
999  * must be supplied in this case.
1000  */
1001 static void output_packet(OutputFile *of, AVPacket *pkt,
1002  OutputStream *ost, int eof)
1003 {
1004  int ret = 0;
1005 
1006  /* apply the output bitstream filters */
1007  if (ost->bsf_ctx) {
1008  ret = av_bsf_send_packet(ost->bsf_ctx, eof ? NULL : pkt);
1009  if (ret < 0)
1010  goto finish;
1011  while ((ret = av_bsf_receive_packet(ost->bsf_ctx, pkt)) >= 0)
1012  write_packet(of, pkt, ost, 0);
1013  if (ret == AVERROR(EAGAIN))
1014  ret = 0;
1015  } else if (!eof)
1016  write_packet(of, pkt, ost, 0);
1017 
1018 finish:
1019  if (ret < 0 && ret != AVERROR_EOF) {
1020  av_log(NULL, AV_LOG_ERROR, "Error applying bitstream filters to an output "
1021  "packet for stream #%d:%d.\n", ost->file_index, ost->index);
1022  if(exit_on_error)
1023  exit_program(1);
1024  }
1025 }
1026 
1028 {
1029  OutputFile *of = output_files[ost->file_index];
1030 
1031  if (of->recording_time != INT64_MAX &&
1032  av_compare_ts(ost->sync_opts - ost->first_pts, ost->enc_ctx->time_base, of->recording_time,
1033  AV_TIME_BASE_Q) >= 0) {
1034  close_output_stream(ost);
1035  return 0;
1036  }
1037  return 1;
1038 }
1039 
1040 static double adjust_frame_pts_to_encoder_tb(OutputFile *of, OutputStream *ost, AVFrame *frame)
1041 {
1042  double float_pts = AV_NOPTS_VALUE; // this is identical to frame.pts but with higher precision
1043  AVCodecContext *enc = ost->enc_ctx;
1044  if (!frame || frame->pts == AV_NOPTS_VALUE ||
1045  !enc || !ost->filter || !ost->filter->graph->graph)
1046  goto early_exit;
1047 
1048  {
1049  AVFilterContext *filter = ost->filter->filter;
1050 
1051  int64_t start_time = (of->start_time == AV_NOPTS_VALUE) ? 0 : of->start_time;
1052  AVRational filter_tb = av_buffersink_get_time_base(filter);
1053  AVRational tb = enc->time_base;
1054  int extra_bits = av_clip(29 - av_log2(tb.den), 0, 16);
1055 
1056  tb.den <<= extra_bits;
1057  float_pts =
1058  av_rescale_q(frame->pts, filter_tb, tb) -
1059  av_rescale_q(start_time, AV_TIME_BASE_Q, tb);
1060  float_pts /= 1 << extra_bits;
1061  // avoid exact midoints to reduce the chance of rounding differences, this can be removed in case the fps code is changed to work with integers
1062  float_pts += FFSIGN(float_pts) * 1.0 / (1<<17);
1063 
1064  frame->pts =
1065  av_rescale_q(frame->pts, filter_tb, enc->time_base) -
1066  av_rescale_q(start_time, AV_TIME_BASE_Q, enc->time_base);
1067  }
1068 
1069 early_exit:
1070 
1071  if (debug_ts) {
1072  av_log(NULL, AV_LOG_INFO, "filter -> pts:%s pts_time:%s exact:%f time_base:%d/%d\n",
1073  frame ? av_ts2str(frame->pts) : "NULL",
1074  frame ? av_ts2timestr(frame->pts, &enc->time_base) : "NULL",
1075  float_pts,
1076  enc ? enc->time_base.num : -1,
1077  enc ? enc->time_base.den : -1);
1078  }
1079 
1080  return float_pts;
1081 }
1082 
1083 static int init_output_stream(OutputStream *ost, AVFrame *frame, char *error, int error_len);
1084 
1085 static int init_output_stream_wrapper(OutputStream *ost, AVFrame *frame, unsigned int fatal)
1086 {
1087  int ret = AVERROR_BUG;
1088  char error[1024] = {0};
1089 
1090  if (ost->initialized)
1091  return 0;
1092 
1093  ret = init_output_stream(ost, frame, error, sizeof(error));
1094  if (ret < 0) {
1095  av_log(NULL, AV_LOG_ERROR, "Error initializing output stream %d:%d -- %s\n",
1096  ost->file_index, ost->index, error);
1097 
1098  if (fatal)
1099  exit_program(1);
1100  }
1101 
1102  return ret;
1103 }
1104 
1105 static void do_audio_out(OutputFile *of, OutputStream *ost,
1106  AVFrame *frame)
1107 {
1108  AVCodecContext *enc = ost->enc_ctx;
1109  AVPacket pkt;
1110  int ret;
1111 
1112  av_init_packet(&pkt);
1113  pkt.data = NULL;
1114  pkt.size = 0;
1115 
1116  adjust_frame_pts_to_encoder_tb(of, ost, frame);
1117 
1118  if (!check_recording_time(ost))
1119  return;
1120 
1121  if (frame->pts == AV_NOPTS_VALUE || audio_sync_method < 0)
1122  frame->pts = ost->sync_opts;
1123  ost->sync_opts = frame->pts + frame->nb_samples;
1124  ost->samples_encoded += frame->nb_samples;
1125  ost->frames_encoded++;
1126 
1127  av_assert0(pkt.size || !pkt.data);
1128  update_benchmark(NULL);
1129  if (debug_ts) {
1130  av_log(NULL, AV_LOG_INFO, "encoder <- type:audio "
1131  "frame_pts:%s frame_pts_time:%s time_base:%d/%d\n",
1132  av_ts2str(frame->pts), av_ts2timestr(frame->pts, &enc->time_base),
1133  enc->time_base.num, enc->time_base.den);
1134  }
1135 
1136  ret = avcodec_send_frame(enc, frame);
1137  if (ret < 0)
1138  goto error;
1139 
1140  while (1) {
1141  ret = avcodec_receive_packet(enc, &pkt);
1142  if (ret == AVERROR(EAGAIN))
1143  break;
1144  if (ret < 0)
1145  goto error;
1146 
1147  update_benchmark("encode_audio %d.%d", ost->file_index, ost->index);
1148 
1149  av_packet_rescale_ts(&pkt, enc->time_base, ost->mux_timebase);
1150 
1151  if (debug_ts) {
1152  av_log(NULL, AV_LOG_INFO, "encoder -> type:audio "
1153  "pkt_pts:%s pkt_pts_time:%s pkt_dts:%s pkt_dts_time:%s\n",
1154  av_ts2str(pkt.pts), av_ts2timestr(pkt.pts, &enc->time_base),
1155  av_ts2str(pkt.dts), av_ts2timestr(pkt.dts, &enc->time_base));
1156  }
1157 
1158  output_packet(of, &pkt, ost, 0);
1159  }
1160 
1161  return;
1162 error:
1163  av_log(NULL, AV_LOG_FATAL, "Audio encoding failed\n");
1164  exit_program(1);
1165 }
1166 
1167 static void do_subtitle_out(OutputFile *of,
1168  OutputStream *ost,
1169  AVSubtitle *sub)
1170 {
1171  int subtitle_out_max_size = 1024 * 1024;
1172  int subtitle_out_size, nb, i;
1173  AVCodecContext *enc;
1174  AVPacket pkt;
1175  int64_t pts;
1176 
1177  if (sub->pts == AV_NOPTS_VALUE) {
1178  av_log(NULL, AV_LOG_ERROR, "Subtitle packets must have a pts\n");
1179  if (exit_on_error)
1180  exit_program(1);
1181  return;
1182  }
1183 
1184  enc = ost->enc_ctx;
1185 
1186  if (!subtitle_out) {
1187  subtitle_out = av_malloc(subtitle_out_max_size);
1188  if (!subtitle_out) {
1189  av_log(NULL, AV_LOG_FATAL, "Failed to allocate subtitle_out\n");
1190  exit_program(1);
1191  }
1192  }
1193 
1194  /* Note: DVB subtitle need one packet to draw them and one other
1195  packet to clear them */
1196  /* XXX: signal it in the codec context ? */
1197  if (enc->codec_id == AV_CODEC_ID_DVB_SUBTITLE)
1198  nb = 2;
1199  else
1200  nb = 1;
1201 
1202  /* shift timestamp to honor -ss and make check_recording_time() work with -t */
1203  pts = sub->pts;
1204  if (output_files[ost->file_index]->start_time != AV_NOPTS_VALUE)
1206  for (i = 0; i < nb; i++) {
1207  unsigned save_num_rects = sub->num_rects;
1208 
1209  ost->sync_opts = av_rescale_q(pts, AV_TIME_BASE_Q, enc->time_base);
1210  if (!check_recording_time(ost))
1211  return;
1212 
1213  sub->pts = pts;
1214  // start_display_time is required to be 0
1215  sub->pts += av_rescale_q(sub->start_display_time, (AVRational){ 1, 1000 }, AV_TIME_BASE_Q);
1216  sub->end_display_time -= sub->start_display_time;
1217  sub->start_display_time = 0;
1218  if (i == 1)
1219  sub->num_rects = 0;
1220 
1221  ost->frames_encoded++;
1222 
1223  subtitle_out_size = avcodec_encode_subtitle(enc, subtitle_out,
1224  subtitle_out_max_size, sub);
1225  if (i == 1)
1226  sub->num_rects = save_num_rects;
1227  if (subtitle_out_size < 0) {
1228  av_log(NULL, AV_LOG_FATAL, "Subtitle encoding failed\n");
1229  exit_program(1);
1230  }
1231 
1232  av_init_packet(&pkt);
1233  pkt.data = subtitle_out;
1234  pkt.size = subtitle_out_size;
1235  pkt.pts = av_rescale_q(sub->pts, AV_TIME_BASE_Q, ost->mux_timebase);
1236  pkt.duration = av_rescale_q(sub->end_display_time, (AVRational){ 1, 1000 }, ost->mux_timebase);
1237  if (enc->codec_id == AV_CODEC_ID_DVB_SUBTITLE) {
1238  /* XXX: the pts correction is handled here. Maybe handling
1239  it in the codec would be better */
1240  if (i == 0)
1241  pkt.pts += av_rescale_q(sub->start_display_time, (AVRational){ 1, 1000 }, ost->mux_timebase);
1242  else
1243  pkt.pts += av_rescale_q(sub->end_display_time, (AVRational){ 1, 1000 }, ost->mux_timebase);
1244  }
1245  pkt.dts = pkt.pts;
1246  output_packet(of, &pkt, ost, 0);
1247  }
1248 }
1249 
1250 static void do_video_out(OutputFile *of,
1251  OutputStream *ost,
1252  AVFrame *next_picture)
1253 {
1254  int ret, format_video_sync;
1255  AVPacket pkt;
1256  AVCodecContext *enc = ost->enc_ctx;
1257  AVRational frame_rate;
1258  int nb_frames, nb0_frames, i;
1259  double delta, delta0;
1260  double duration = 0;
1261  double sync_ipts = AV_NOPTS_VALUE;
1262  int frame_size = 0;
1263  InputStream *ist = NULL;
1264  AVFilterContext *filter = ost->filter->filter;
1265 
1266  init_output_stream_wrapper(ost, next_picture, 1);
1267  sync_ipts = adjust_frame_pts_to_encoder_tb(of, ost, next_picture);
1268 
1269  if (ost->source_index >= 0)
1270  ist = input_streams[ost->source_index];
1271 
1272  frame_rate = av_buffersink_get_frame_rate(filter);
1273  if (frame_rate.num > 0 && frame_rate.den > 0)
1274  duration = 1/(av_q2d(frame_rate) * av_q2d(enc->time_base));
1275 
1276  if(ist && ist->st->start_time != AV_NOPTS_VALUE && ist->st->first_dts != AV_NOPTS_VALUE && ost->frame_rate.num)
1277  duration = FFMIN(duration, 1/(av_q2d(ost->frame_rate) * av_q2d(enc->time_base)));
1278 
1279  if (!ost->filters_script &&
1280  !ost->filters &&
1281  (nb_filtergraphs == 0 || !filtergraphs[0]->graph_desc) &&
1282  next_picture &&
1283  ist &&
1284  lrintf(next_picture->pkt_duration * av_q2d(ist->st->time_base) / av_q2d(enc->time_base)) > 0) {
1285  duration = lrintf(next_picture->pkt_duration * av_q2d(ist->st->time_base) / av_q2d(enc->time_base));
1286  }
1287 
1288  if (!next_picture) {
1289  //end, flushing
1290  nb0_frames = nb_frames = mid_pred(ost->last_nb0_frames[0],
1291  ost->last_nb0_frames[1],
1292  ost->last_nb0_frames[2]);
1293  } else {
1294  delta0 = sync_ipts - ost->sync_opts; // delta0 is the "drift" between the input frame (next_picture) and where it would fall in the output.
1295  delta = delta0 + duration;
1296 
1297  /* by default, we output a single frame */
1298  nb0_frames = 0; // tracks the number of times the PREVIOUS frame should be duplicated, mostly for variable framerate (VFR)
1299  nb_frames = 1;
1300 
1301  format_video_sync = video_sync_method;
1302  if (format_video_sync == VSYNC_AUTO) {
1303  if(!strcmp(of->ctx->oformat->name, "avi")) {
1304  format_video_sync = VSYNC_VFR;
1305  } else
1306  format_video_sync = (of->ctx->oformat->flags & AVFMT_VARIABLE_FPS) ? ((of->ctx->oformat->flags & AVFMT_NOTIMESTAMPS) ? VSYNC_PASSTHROUGH : VSYNC_VFR) : VSYNC_CFR;
1307  if ( ist
1308  && format_video_sync == VSYNC_CFR
1309  && input_files[ist->file_index]->ctx->nb_streams == 1
1310  && input_files[ist->file_index]->input_ts_offset == 0) {
1311  format_video_sync = VSYNC_VSCFR;
1312  }
1313  if (format_video_sync == VSYNC_CFR && copy_ts) {
1314  format_video_sync = VSYNC_VSCFR;
1315  }
1316  }
1317  ost->is_cfr = (format_video_sync == VSYNC_CFR || format_video_sync == VSYNC_VSCFR);
1318 
1319  if (delta0 < 0 &&
1320  delta > 0 &&
1321  format_video_sync != VSYNC_PASSTHROUGH &&
1322  format_video_sync != VSYNC_DROP) {
1323  if (delta0 < -0.6) {
1324  av_log(NULL, AV_LOG_VERBOSE, "Past duration %f too large\n", -delta0);
1325  } else
1326  av_log(NULL, AV_LOG_DEBUG, "Clipping frame in rate conversion by %f\n", -delta0);
1327  sync_ipts = ost->sync_opts;
1328  duration += delta0;
1329  delta0 = 0;
1330  }
1331 
1332  switch (format_video_sync) {
1333  case VSYNC_VSCFR:
1334  if (ost->frame_number == 0 && delta0 >= 0.5) {
1335  av_log(NULL, AV_LOG_DEBUG, "Not duplicating %d initial frames\n", (int)lrintf(delta0));
1336  delta = duration;
1337  delta0 = 0;
1338  ost->sync_opts = llrint(sync_ipts);
1339  }
1340  case VSYNC_CFR:
1341  // FIXME set to 0.5 after we fix some dts/pts bugs like in avidec.c
1342  if (frame_drop_threshold && delta < frame_drop_threshold && ost->frame_number) {
1343  nb_frames = 0;
1344  } else if (delta < -1.1)
1345  nb_frames = 0;
1346  else if (delta > 1.1) {
1347  nb_frames = lrintf(delta);
1348  if (delta0 > 1.1)
1349  nb0_frames = llrintf(delta0 - 0.6);
1350  }
1351  break;
1352  case VSYNC_VFR:
1353  if (delta <= -0.6)
1354  nb_frames = 0;
1355  else if (delta > 0.6)
1356  ost->sync_opts = llrint(sync_ipts);
1357  break;
1358  case VSYNC_DROP:
1359  case VSYNC_PASSTHROUGH:
1360  ost->sync_opts = llrint(sync_ipts);
1361  break;
1362  default:
1363  av_assert0(0);
1364  }
1365  }
1366 
1367  nb_frames = FFMIN(nb_frames, ost->max_frames - ost->frame_number);
1368  nb0_frames = FFMIN(nb0_frames, nb_frames);
1369 
1370  memmove(ost->last_nb0_frames + 1,
1371  ost->last_nb0_frames,
1372  sizeof(ost->last_nb0_frames[0]) * (FF_ARRAY_ELEMS(ost->last_nb0_frames) - 1));
1373  ost->last_nb0_frames[0] = nb0_frames;
1374 
1375  if (nb0_frames == 0 && ost->last_dropped) {
1376  nb_frames_drop++;
1377  av_log(NULL, AV_LOG_VERBOSE,
1378  "*** dropping frame %d from stream %d at ts %"PRId64"\n",
1379  ost->frame_number, ost->st->index, ost->last_frame->pts);
1380  }
1381  if (nb_frames > (nb0_frames && ost->last_dropped) + (nb_frames > nb0_frames)) {
1382  if (nb_frames > dts_error_threshold * 30) {
1383  av_log(NULL, AV_LOG_ERROR, "%d frame duplication too large, skipping\n", nb_frames - 1);
1384  nb_frames_drop++;
1385  return;
1386  }
1387  nb_frames_dup += nb_frames - (nb0_frames && ost->last_dropped) - (nb_frames > nb0_frames);
1388  av_log(NULL, AV_LOG_VERBOSE, "*** %d dup!\n", nb_frames - 1);
1389  if (nb_frames_dup > dup_warning) {
1390  av_log(NULL, AV_LOG_WARNING, "More than %d frames duplicated\n", dup_warning);
1391  dup_warning *= 10;
1392  }
1393  }
1394  ost->last_dropped = nb_frames == nb0_frames && next_picture;
1395 
1396  /* duplicates frame if needed */
1397  for (i = 0; i < nb_frames; i++) {
1398  AVFrame *in_picture;
1399  int forced_keyframe = 0;
1400  double pts_time;
1401  av_init_packet(&pkt);
1402  pkt.data = NULL;
1403  pkt.size = 0;
1404 
1405  if (i < nb0_frames && ost->last_frame) {
1406  in_picture = ost->last_frame;
1407  } else
1408  in_picture = next_picture;
1409 
1410  if (!in_picture)
1411  return;
1412 
1413  in_picture->pts = ost->sync_opts;
1414 
1415  if (!check_recording_time(ost))
1416  return;
1417 
1418  in_picture->quality = enc->global_quality;
1419  in_picture->pict_type = 0;
1420 
1421  if (ost->forced_kf_ref_pts == AV_NOPTS_VALUE &&
1422  in_picture->pts != AV_NOPTS_VALUE)
1423  ost->forced_kf_ref_pts = in_picture->pts;
1424 
1425  pts_time = in_picture->pts != AV_NOPTS_VALUE ?
1426  (in_picture->pts - ost->forced_kf_ref_pts) * av_q2d(enc->time_base) : NAN;
1427  if (ost->forced_kf_index < ost->forced_kf_count &&
1428  in_picture->pts >= ost->forced_kf_pts[ost->forced_kf_index]) {
1429  ost->forced_kf_index++;
1430  forced_keyframe = 1;
1431  } else if (ost->forced_keyframes_pexpr) {
1432  double res;
1433  ost->forced_keyframes_expr_const_values[FKF_T] = pts_time;
1434  res = av_expr_eval(ost->forced_keyframes_pexpr,
1436  ff_dlog(NULL, "force_key_frame: n:%f n_forced:%f prev_forced_n:%f t:%f prev_forced_t:%f -> res:%f\n",
1442  res);
1443  if (res) {
1444  forced_keyframe = 1;
1450  }
1451 
1453  } else if ( ost->forced_keyframes
1454  && !strncmp(ost->forced_keyframes, "source", 6)
1455  && in_picture->key_frame==1
1456  && !i) {
1457  forced_keyframe = 1;
1458  }
1459 
1460  if (forced_keyframe) {
1461  in_picture->pict_type = AV_PICTURE_TYPE_I;
1462  av_log(NULL, AV_LOG_DEBUG, "Forced keyframe at time %f\n", pts_time);
1463  }
1464 
1465  update_benchmark(NULL);
1466  if (debug_ts) {
1467  av_log(NULL, AV_LOG_INFO, "encoder <- type:video "
1468  "frame_pts:%s frame_pts_time:%s time_base:%d/%d\n",
1469  av_ts2str(in_picture->pts), av_ts2timestr(in_picture->pts, &enc->time_base),
1470  enc->time_base.num, enc->time_base.den);
1471  }
1472 
1473  ost->frames_encoded++;
1474 
1475  ret = avcodec_send_frame(enc, in_picture);
1476  if (ret < 0)
1477  goto error;
1478  // Make sure Closed Captions will not be duplicated
1479  av_frame_remove_side_data(in_picture, AV_FRAME_DATA_A53_CC);
1480 
1481  while (1) {
1482  ret = avcodec_receive_packet(enc, &pkt);
1483  update_benchmark("encode_video %d.%d", ost->file_index, ost->index);
1484  if (ret == AVERROR(EAGAIN))
1485  break;
1486  if (ret < 0)
1487  goto error;
1488 
1489  if (debug_ts) {
1490  av_log(NULL, AV_LOG_INFO, "encoder -> type:video "
1491  "pkt_pts:%s pkt_pts_time:%s pkt_dts:%s pkt_dts_time:%s\n",
1492  av_ts2str(pkt.pts), av_ts2timestr(pkt.pts, &enc->time_base),
1493  av_ts2str(pkt.dts), av_ts2timestr(pkt.dts, &enc->time_base));
1494  }
1495 
1496  if (pkt.pts == AV_NOPTS_VALUE && !(enc->codec->capabilities & AV_CODEC_CAP_DELAY))
1497  pkt.pts = ost->sync_opts;
1498 
1499  av_packet_rescale_ts(&pkt, enc->time_base, ost->mux_timebase);
1500 
1501  if (debug_ts) {
1502  av_log(NULL, AV_LOG_INFO, "encoder -> type:video "
1503  "pkt_pts:%s pkt_pts_time:%s pkt_dts:%s pkt_dts_time:%s\n",
1504  av_ts2str(pkt.pts), av_ts2timestr(pkt.pts, &ost->mux_timebase),
1505  av_ts2str(pkt.dts), av_ts2timestr(pkt.dts, &ost->mux_timebase));
1506  }
1507 
1508  frame_size = pkt.size;
1509  output_packet(of, &pkt, ost, 0);
1510 
1511  /* if two pass, output log */
1512  if (ost->logfile && enc->stats_out) {
1513  fprintf(ost->logfile, "%s", enc->stats_out);
1514  }
1515  }
1516  ost->sync_opts++;
1517  /*
1518  * For video, number of frames in == number of packets out.
1519  * But there may be reordering, so we can't throw away frames on encoder
1520  * flush, we need to limit them here, before they go into encoder.
1521  */
1522  ost->frame_number++;
1523 
1524  if (vstats_filename && frame_size)
1525  do_video_stats(ost, frame_size);
1526  }
1527 
1528  if (!ost->last_frame)
1529  ost->last_frame = av_frame_alloc();
1530  av_frame_unref(ost->last_frame);
1531  if (next_picture && ost->last_frame)
1532  av_frame_ref(ost->last_frame, next_picture);
1533  else
1534  av_frame_free(&ost->last_frame);
1535 
1536  return;
1537 error:
1538  av_log(NULL, AV_LOG_FATAL, "Video encoding failed\n");
1539  exit_program(1);
1540 }
1541 
1542 static double psnr(double d)
1543 {
1544  return -10.0 * log10(d);
1545 }
1546 
1547 static void do_video_stats(OutputStream *ost, int frame_size)
1548 {
1549  AVCodecContext *enc;
1550  int frame_number;
1551  double ti1, bitrate, avg_bitrate;
1552 
1553  /* this is executed just the first time do_video_stats is called */
1554  if (!vstats_file) {
1555  vstats_file = fopen(vstats_filename, "w");
1556  if (!vstats_file) {
1557  perror("fopen");
1558  exit_program(1);
1559  }
1560  }
1561 
1562  enc = ost->enc_ctx;
1563  if (enc->codec_type == AVMEDIA_TYPE_VIDEO) {
1564  frame_number = ost->st->nb_frames;
1565  if (vstats_version <= 1) {
1566  fprintf(vstats_file, "frame= %5d q= %2.1f ", frame_number,
1567  ost->quality / (float)FF_QP2LAMBDA);
1568  } else {
1569  fprintf(vstats_file, "out= %2d st= %2d frame= %5d q= %2.1f ", ost->file_index, ost->index, frame_number,
1570  ost->quality / (float)FF_QP2LAMBDA);
1571  }
1572 
1573  if (ost->error[0]>=0 && (enc->flags & AV_CODEC_FLAG_PSNR))
1574  fprintf(vstats_file, "PSNR= %6.2f ", psnr(ost->error[0] / (enc->width * enc->height * 255.0 * 255.0)));
1575 
1576  fprintf(vstats_file,"f_size= %6d ", frame_size);
1577  /* compute pts value */
1578  ti1 = av_stream_get_end_pts(ost->st) * av_q2d(ost->st->time_base);
1579  if (ti1 < 0.01)
1580  ti1 = 0.01;
1581 
1582  bitrate = (frame_size * 8) / av_q2d(enc->time_base) / 1000.0;
1583  avg_bitrate = (double)(ost->data_size * 8) / ti1 / 1000.0;
1584  fprintf(vstats_file, "s_size= %8.0fkB time= %0.3f br= %7.1fkbits/s avg_br= %7.1fkbits/s ",
1585  (double)ost->data_size / 1024, ti1, bitrate, avg_bitrate);
1586  fprintf(vstats_file, "type= %c\n", av_get_picture_type_char(ost->pict_type));
1587  }
1588 }
1589 
1591 {
1592  OutputFile *of = output_files[ost->file_index];
1593  int i;
1594 
1596 
1597  if (of->shortest) {
1598  for (i = 0; i < of->ctx->nb_streams; i++)
1600  }
1601 }
1602 
1609 static int reap_filters(int flush)
1610 {
1611  AVFrame *filtered_frame = NULL;
1612  int i;
1613 
1614  /* Reap all buffers present in the buffer sinks */
1615  for (i = 0; i < nb_output_streams; i++) {
1616  OutputStream *ost = output_streams[i];
1617  OutputFile *of = output_files[ost->file_index];
1618  AVFilterContext *filter;
1619  AVCodecContext *enc = ost->enc_ctx;
1620  int ret = 0;
1621 
1622  if (!ost->filter || !ost->filter->graph->graph)
1623  continue;
1624  filter = ost->filter->filter;
1625 
1626  /*
1627  * Unlike video, with audio the audio frame size matters.
1628  * Currently we are fully reliant on the lavfi filter chain to
1629  * do the buffering deed for us, and thus the frame size parameter
1630  * needs to be set accordingly. Where does one get the required
1631  * frame size? From the initialized AVCodecContext of an audio
1632  * encoder. Thus, if we have gotten to an audio stream, initialize
1633  * the encoder earlier than receiving the first AVFrame.
1634  */
1635  if (av_buffersink_get_type(filter) == AVMEDIA_TYPE_AUDIO)
1636  init_output_stream_wrapper(ost, NULL, 1);
1637 
1638  if (!ost->filtered_frame && !(ost->filtered_frame = av_frame_alloc())) {
1639  return AVERROR(ENOMEM);
1640  }
1641  filtered_frame = ost->filtered_frame;
1642 
1643  while (1) {
1644  ret = av_buffersink_get_frame_flags(filter, filtered_frame,
1645  AV_BUFFERSINK_FLAG_NO_REQUEST);
1646  if (ret < 0) {
1647  if (ret != AVERROR(EAGAIN) && ret != AVERROR_EOF) {
1648  av_log(NULL, AV_LOG_WARNING,
1649  "Error in av_buffersink_get_frame_flags(): %s\n", av_err2str(ret));
1650  } else if (flush && ret == AVERROR_EOF) {
1651  if (av_buffersink_get_type(filter) == AVMEDIA_TYPE_VIDEO)
1652  do_video_out(of, ost, NULL);
1653  }
1654  break;
1655  }
1656  if (ost->finished) {
1657  av_frame_unref(filtered_frame);
1658  continue;
1659  }
1660 
1661  switch (av_buffersink_get_type(filter)) {
1662  case AVMEDIA_TYPE_VIDEO:
1663  if (!ost->frame_aspect_ratio.num)
1664  enc->sample_aspect_ratio = filtered_frame->sample_aspect_ratio;
1665 
1666  do_video_out(of, ost, filtered_frame);
1667  break;
1668  case AVMEDIA_TYPE_AUDIO:
1669  if (!(enc->codec->capabilities & AV_CODEC_CAP_PARAM_CHANGE) &&
1670  enc->channels != filtered_frame->channels) {
1671  av_log(NULL, AV_LOG_ERROR,
1672  "Audio filter graph output is not normalized and encoder does not support parameter changes\n");
1673  break;
1674  }
1675  do_audio_out(of, ost, filtered_frame);
1676  break;
1677  default:
1678  // TODO support subtitle filters
1679  av_assert0(0);
1680  }
1681 
1682  av_frame_unref(filtered_frame);
1683  }
1684  }
1685 
1686  return 0;
1687 }
1688 
1689 static void print_final_stats(int64_t total_size)
1690 {
1691  uint64_t video_size = 0, audio_size = 0, extra_size = 0, other_size = 0;
1692  uint64_t subtitle_size = 0;
1693  uint64_t data_size = 0;
1694  float percent = -1.0;
1695  int i, j;
1696  int pass1_used = 1;
1697 
1698  for (i = 0; i < nb_output_streams; i++) {
1699  OutputStream *ost = output_streams[i];
1700  switch (ost->enc_ctx->codec_type) {
1701  case AVMEDIA_TYPE_VIDEO: video_size += ost->data_size; break;
1702  case AVMEDIA_TYPE_AUDIO: audio_size += ost->data_size; break;
1703  case AVMEDIA_TYPE_SUBTITLE: subtitle_size += ost->data_size; break;
1704  default: other_size += ost->data_size; break;
1705  }
1706  extra_size += ost->enc_ctx->extradata_size;
1707  data_size += ost->data_size;
1708  if ( (ost->enc_ctx->flags & (AV_CODEC_FLAG_PASS1 | AV_CODEC_FLAG_PASS2))
1709  != AV_CODEC_FLAG_PASS1)
1710  pass1_used = 0;
1711  }
1712 
1713  if (data_size && total_size>0 && total_size >= data_size)
1714  percent = 100.0 * (total_size - data_size) / data_size;
1715 
1716  av_log(NULL, AV_LOG_INFO, "video:%1.0fkB audio:%1.0fkB subtitle:%1.0fkB other streams:%1.0fkB global headers:%1.0fkB muxing overhead: ",
1717  video_size / 1024.0,
1718  audio_size / 1024.0,
1719  subtitle_size / 1024.0,
1720  other_size / 1024.0,
1721  extra_size / 1024.0);
1722  if (percent >= 0.0)
1723  av_log(NULL, AV_LOG_INFO, "%f%%", percent);
1724  else
1725  av_log(NULL, AV_LOG_INFO, "unknown");
1726  av_log(NULL, AV_LOG_INFO, "\n");
1727 
1728  /* print verbose per-stream stats */
1729  for (i = 0; i < nb_input_files; i++) {
1730  InputFile *f = input_files[i];
1731  uint64_t total_packets = 0, total_size = 0;
1732 
1733  av_log(NULL, AV_LOG_VERBOSE, "Input file #%d (%s):\n",
1734  i, f->ctx->url);
1735 
1736  for (j = 0; j < f->nb_streams; j++) {
1737  InputStream *ist = input_streams[f->ist_index + j];
1738  enum AVMediaType type = ist->dec_ctx->codec_type;
1739 
1740  total_size += ist->data_size;
1741  total_packets += ist->nb_packets;
1742 
1743  av_log(NULL, AV_LOG_VERBOSE, " Input stream #%d:%d (%s): ",
1744  i, j, media_type_string(type));
1745  av_log(NULL, AV_LOG_VERBOSE, "%"PRIu64" packets read (%"PRIu64" bytes); ",
1746  ist->nb_packets, ist->data_size);
1747 
1748  if (ist->decoding_needed) {
1749  av_log(NULL, AV_LOG_VERBOSE, "%"PRIu64" frames decoded",
1750  ist->frames_decoded);
1751  if (type == AVMEDIA_TYPE_AUDIO)
1752  av_log(NULL, AV_LOG_VERBOSE, " (%"PRIu64" samples)", ist->samples_decoded);
1753  av_log(NULL, AV_LOG_VERBOSE, "; ");
1754  }
1755 
1756  av_log(NULL, AV_LOG_VERBOSE, "\n");
1757  }
1758 
1759  av_log(NULL, AV_LOG_VERBOSE, " Total: %"PRIu64" packets (%"PRIu64" bytes) demuxed\n",
1760  total_packets, total_size);
1761  }
1762 
1763  for (i = 0; i < nb_output_files; i++) {
1764  OutputFile *of = output_files[i];
1765  uint64_t total_packets = 0, total_size = 0;
1766 
1767  av_log(NULL, AV_LOG_VERBOSE, "Output file #%d (%s):\n",
1768  i, of->ctx->url);
1769 
1770  for (j = 0; j < of->ctx->nb_streams; j++) {
1771  OutputStream *ost = output_streams[of->ost_index + j];
1772  enum AVMediaType type = ost->enc_ctx->codec_type;
1773 
1774  total_size += ost->data_size;
1775  total_packets += ost->packets_written;
1776 
1777  av_log(NULL, AV_LOG_VERBOSE, " Output stream #%d:%d (%s): ",
1778  i, j, media_type_string(type));
1779  if (ost->encoding_needed) {
1780  av_log(NULL, AV_LOG_VERBOSE, "%"PRIu64" frames encoded",
1781  ost->frames_encoded);
1782  if (type == AVMEDIA_TYPE_AUDIO)
1783  av_log(NULL, AV_LOG_VERBOSE, " (%"PRIu64" samples)", ost->samples_encoded);
1784  av_log(NULL, AV_LOG_VERBOSE, "; ");
1785  }
1786 
1787  av_log(NULL, AV_LOG_VERBOSE, "%"PRIu64" packets muxed (%"PRIu64" bytes); ",
1788  ost->packets_written, ost->data_size);
1789 
1790  av_log(NULL, AV_LOG_VERBOSE, "\n");
1791  }
1792 
1793  av_log(NULL, AV_LOG_VERBOSE, " Total: %"PRIu64" packets (%"PRIu64" bytes) muxed\n",
1794  total_packets, total_size);
1795  }
1796  if(video_size + data_size + audio_size + subtitle_size + extra_size == 0){
1797  av_log(NULL, AV_LOG_WARNING, "Output file is empty, nothing was encoded ");
1798  if (pass1_used) {
1799  av_log(NULL, AV_LOG_WARNING, "\n");
1800  } else {
1801  av_log(NULL, AV_LOG_WARNING, "(check -ss / -t / -frames parameters if used)\n");
1802  }
1803  }
1804 }
1805 
1806 static void forward_report(int is_last_report, int64_t timer_start, int64_t cur_time)
1807 {
1808  AVFormatContext *oc = NULL;
1809  AVCodecContext *enc = NULL;
1810  OutputStream *ost = NULL;
1811  int64_t pts = INT64_MIN + 1;
1812  int vid, i;
1813 
1814  int frame_number = 0;
1815  float fps = 0;
1816  float quality = 0;
1817  int64_t total_size = 0;
1818  int seconds = 0;
1819  double bitrate = 0.0;
1820  double speed = 0.0;
1821 
1822  float t = (cur_time-timer_start) / 1000000.0;
1823 
1824  oc = output_files[0]->ctx;
1825 
1826  // 1. calculate size
1827  total_size = avio_size(oc->pb);
1828  if (total_size <= 0) {
1829  total_size = avio_tell(oc->pb);
1830  }
1831 
1832  vid = 0;
1833  for (i = 0; i < nb_output_streams; i++) {
1834  ost = output_streams[i];
1835  enc = ost->enc_ctx;
1836 
1837  if (!ost->stream_copy) {
1838 
1839  // 2. extract quality
1840  quality = ost->quality / (float) FF_QP2LAMBDA;
1841  }
1842 
1843  if (!vid && enc->codec_type == AVMEDIA_TYPE_VIDEO) {
1844 
1845  // 3. extract frame number
1846  frame_number = ost->frame_number;
1847 
1848  // 4. calculate fps
1849  fps = t > 1 ? frame_number / t : 0;
1850  }
1851 
1852  // 5. calculate time
1853  if (av_stream_get_end_pts(ost->st) != AV_NOPTS_VALUE)
1854  pts = FFMAX(pts, av_rescale_q(av_stream_get_end_pts(ost->st),
1855  ost->st->time_base, AV_TIME_BASE_Q));
1856 
1857  vid = 1;
1858  }
1859 
1860  // 6. calculate time, with microseconds to milliseconds conversion
1861  seconds = FFABS(pts) / 1000;
1862 
1863  // 7. calculating kbit/s value
1864  bitrate = pts && total_size >= 0 ? total_size * 8 / (pts / 1000.0) : -1;
1865 
1866  // 9. calculate processing speed = processed stream duration/operation duration
1867  speed = t != 0.0 ? (double)pts / AV_TIME_BASE / t : -1;
1868 
1869  // FORWARD DATA
1870  if (report_callback != NULL) {
1871  report_callback(frame_number, fps, quality, total_size, seconds, bitrate, speed);
1872  }
1873 }
1874 
1875 static void print_report(int is_last_report, int64_t timer_start, int64_t cur_time)
1876 {
1877  AVBPrint buf, buf_script;
1878  OutputStream *ost;
1879  AVFormatContext *oc;
1880  int64_t total_size;
1881  AVCodecContext *enc;
1882  int frame_number, vid, i;
1883  double bitrate;
1884  double speed;
1885  int64_t pts = INT64_MIN + 1;
1886  int hours, mins, secs, us;
1887  const char *hours_sign;
1888  int ret;
1889  float t;
1890 
1891  if (!is_last_report) {
1892  if (last_time == -1) {
1893  last_time = cur_time;
1894  }
1895  if (((cur_time - last_time) < stats_period && !first_report) ||
1897  return;
1898  last_time = cur_time;
1899  }
1900 
1901  forward_report(is_last_report, timer_start, cur_time);
1902 
1903  if (!print_stats && !is_last_report && !progress_avio)
1904  return;
1905 
1906  t = (cur_time-timer_start) / 1000000.0;
1907 
1908 
1909  oc = output_files[0]->ctx;
1910 
1911  total_size = avio_size(oc->pb);
1912  if (total_size <= 0) // FIXME improve avio_size() so it works with non seekable output too
1913  total_size = avio_tell(oc->pb);
1914 
1915  vid = 0;
1916  av_bprint_init(&buf, 0, AV_BPRINT_SIZE_AUTOMATIC);
1917  av_bprint_init(&buf_script, 0, AV_BPRINT_SIZE_AUTOMATIC);
1918  for (i = 0; i < nb_output_streams; i++) {
1919  float q = -1;
1920  ost = output_streams[i];
1921  enc = ost->enc_ctx;
1922  if (!ost->stream_copy)
1923  q = ost->quality / (float) FF_QP2LAMBDA;
1924 
1925  if (vid && enc->codec_type == AVMEDIA_TYPE_VIDEO) {
1926  av_bprintf(&buf, "q=%2.1f ", q);
1927  av_bprintf(&buf_script, "stream_%d_%d_q=%.1f\n",
1928  ost->file_index, ost->index, q);
1929  }
1930  if (!vid && enc->codec_type == AVMEDIA_TYPE_VIDEO) {
1931  float fps;
1932 
1933  frame_number = ost->frame_number;
1934  fps = t > 1 ? frame_number / t : 0;
1935  av_bprintf(&buf, "frame=%5d fps=%3.*f q=%3.1f ",
1936  frame_number, fps < 9.95, fps, q);
1937  av_bprintf(&buf_script, "frame=%d\n", frame_number);
1938  av_bprintf(&buf_script, "fps=%.2f\n", fps);
1939  av_bprintf(&buf_script, "stream_%d_%d_q=%.1f\n",
1940  ost->file_index, ost->index, q);
1941  if (is_last_report)
1942  av_bprintf(&buf, "L");
1943  if (qp_hist) {
1944  int j;
1945  int qp = lrintf(q);
1946  if (qp >= 0 && qp < FF_ARRAY_ELEMS(qp_histogram))
1947  qp_histogram[qp]++;
1948  for (j = 0; j < 32; j++)
1949  av_bprintf(&buf, "%X", av_log2(qp_histogram[j] + 1));
1950  }
1951 
1952  if ((enc->flags & AV_CODEC_FLAG_PSNR) && (ost->pict_type != AV_PICTURE_TYPE_NONE || is_last_report)) {
1953  int j;
1954  double error, error_sum = 0;
1955  double scale, scale_sum = 0;
1956  double p;
1957  char type[3] = { 'Y','U','V' };
1958  av_bprintf(&buf, "PSNR=");
1959  for (j = 0; j < 3; j++) {
1960  if (is_last_report) {
1961  error = enc->error[j];
1962  scale = enc->width * enc->height * 255.0 * 255.0 * frame_number;
1963  } else {
1964  error = ost->error[j];
1965  scale = enc->width * enc->height * 255.0 * 255.0;
1966  }
1967  if (j)
1968  scale /= 4;
1969  error_sum += error;
1970  scale_sum += scale;
1971  p = psnr(error / scale);
1972  av_bprintf(&buf, "%c:%2.2f ", type[j], p);
1973  av_bprintf(&buf_script, "stream_%d_%d_psnr_%c=%2.2f\n",
1974  ost->file_index, ost->index, type[j] | 32, p);
1975  }
1976  p = psnr(error_sum / scale_sum);
1977  av_bprintf(&buf, "*:%2.2f ", psnr(error_sum / scale_sum));
1978  av_bprintf(&buf_script, "stream_%d_%d_psnr_all=%2.2f\n",
1979  ost->file_index, ost->index, p);
1980  }
1981  vid = 1;
1982  }
1983  /* compute min output value */
1984  if (av_stream_get_end_pts(ost->st) != AV_NOPTS_VALUE) {
1985  pts = FFMAX(pts, av_rescale_q(av_stream_get_end_pts(ost->st),
1986  ost->st->time_base, AV_TIME_BASE_Q));
1987  if (copy_ts) {
1988  if (copy_ts_first_pts == AV_NOPTS_VALUE && pts > 1)
1990  if (copy_ts_first_pts != AV_NOPTS_VALUE)
1992  }
1993  }
1994 
1995  if (is_last_report)
1996  nb_frames_drop += ost->last_dropped;
1997  }
1998 
1999  secs = FFABS(pts) / AV_TIME_BASE;
2000  us = FFABS(pts) % AV_TIME_BASE;
2001  mins = secs / 60;
2002  secs %= 60;
2003  hours = mins / 60;
2004  mins %= 60;
2005  hours_sign = (pts < 0) ? "-" : "";
2006 
2007  bitrate = pts && total_size >= 0 ? total_size * 8 / (pts / 1000.0) : -1;
2008  speed = t != 0.0 ? (double)pts / AV_TIME_BASE / t : -1;
2009 
2010  if (total_size < 0) av_bprintf(&buf, "size=N/A time=");
2011  else av_bprintf(&buf, "size=%8.0fkB time=", total_size / 1024.0);
2012  if (pts == AV_NOPTS_VALUE) {
2013  av_bprintf(&buf, "N/A ");
2014  } else {
2015  av_bprintf(&buf, "%s%02d:%02d:%02d.%02d ",
2016  hours_sign, hours, mins, secs, (100 * us) / AV_TIME_BASE);
2017  }
2018 
2019  if (bitrate < 0) {
2020  av_bprintf(&buf, "bitrate=N/A");
2021  av_bprintf(&buf_script, "bitrate=N/A\n");
2022  }else{
2023  av_bprintf(&buf, "bitrate=%6.1fkbits/s", bitrate);
2024  av_bprintf(&buf_script, "bitrate=%6.1fkbits/s\n", bitrate);
2025  }
2026 
2027  if (total_size < 0) av_bprintf(&buf_script, "total_size=N/A\n");
2028  else av_bprintf(&buf_script, "total_size=%"PRId64"\n", total_size);
2029  if (pts == AV_NOPTS_VALUE) {
2030  av_bprintf(&buf_script, "out_time_us=N/A\n");
2031  av_bprintf(&buf_script, "out_time_ms=N/A\n");
2032  av_bprintf(&buf_script, "out_time=N/A\n");
2033  } else {
2034  av_bprintf(&buf_script, "out_time_us=%"PRId64"\n", pts);
2035  av_bprintf(&buf_script, "out_time_ms=%"PRId64"\n", pts);
2036  av_bprintf(&buf_script, "out_time=%s%02d:%02d:%02d.%06d\n",
2037  hours_sign, hours, mins, secs, us);
2038  }
2039 
2041  av_bprintf(&buf, " dup=%d drop=%d", nb_frames_dup, nb_frames_drop);
2042  av_bprintf(&buf_script, "dup_frames=%d\n", nb_frames_dup);
2043  av_bprintf(&buf_script, "drop_frames=%d\n", nb_frames_drop);
2044 
2045  if (speed < 0) {
2046  av_bprintf(&buf, " speed=N/A");
2047  av_bprintf(&buf_script, "speed=N/A\n");
2048  } else {
2049  av_bprintf(&buf, " speed=%4.3gx", speed);
2050  av_bprintf(&buf_script, "speed=%4.3gx\n", speed);
2051  }
2052 
2053  if (print_stats || is_last_report) {
2054  const char end = is_last_report ? '\n' : '\r';
2055  if (print_stats==1 && AV_LOG_INFO > av_log_get_level()) {
2056  fprintf(stderr, "%s %c", buf.str, end);
2057  } else
2058  av_log(NULL, AV_LOG_INFO, "%s %c", buf.str, end);
2059 
2060  fflush(stderr);
2061  }
2062  av_bprint_finalize(&buf, NULL);
2063 
2064  if (progress_avio) {
2065  av_bprintf(&buf_script, "progress=%s\n",
2066  is_last_report ? "end" : "continue");
2067  avio_write(progress_avio, buf_script.str,
2068  FFMIN(buf_script.len, buf_script.size - 1));
2069  avio_flush(progress_avio);
2070  av_bprint_finalize(&buf_script, NULL);
2071  if (is_last_report) {
2072  if ((ret = avio_closep(&progress_avio)) < 0)
2073  av_log(NULL, AV_LOG_ERROR,
2074  "Error closing progress log, loss of information possible: %s\n", av_err2str(ret));
2075  }
2076  }
2077 
2078  first_report = 0;
2079 
2080  if (is_last_report)
2081  print_final_stats(total_size);
2082 }
2083 
2084 static void ifilter_parameters_from_codecpar(InputFilter *ifilter, AVCodecParameters *par)
2085 {
2086  // We never got any input. Set a fake format, which will
2087  // come from libavformat.
2088  ifilter->format = par->format;
2089  ifilter->sample_rate = par->sample_rate;
2090  ifilter->channels = par->channels;
2091  ifilter->channel_layout = par->channel_layout;
2092  ifilter->width = par->width;
2093  ifilter->height = par->height;
2094  ifilter->sample_aspect_ratio = par->sample_aspect_ratio;
2095 }
2096 
2097 static void flush_encoders(void)
2098 {
2099  int i, ret;
2100 
2101  for (i = 0; i < nb_output_streams; i++) {
2102  OutputStream *ost = output_streams[i];
2103  AVCodecContext *enc = ost->enc_ctx;
2104  OutputFile *of = output_files[ost->file_index];
2105 
2106  if (!ost->encoding_needed)
2107  continue;
2108 
2109  // Try to enable encoding with no input frames.
2110  // Maybe we should just let encoding fail instead.
2111  if (!ost->initialized) {
2112  FilterGraph *fg = ost->filter->graph;
2113 
2114  av_log(NULL, AV_LOG_WARNING,
2115  "Finishing stream %d:%d without any data written to it.\n",
2116  ost->file_index, ost->st->index);
2117 
2118  if (ost->filter && !fg->graph) {
2119  int x;
2120  for (x = 0; x < fg->nb_inputs; x++) {
2121  InputFilter *ifilter = fg->inputs[x];
2122  if (ifilter->format < 0)
2123  ifilter_parameters_from_codecpar(ifilter, ifilter->ist->st->codecpar);
2124  }
2125 
2127  continue;
2128 
2129  ret = configure_filtergraph(fg);
2130  if (ret < 0) {
2131  av_log(NULL, AV_LOG_ERROR, "Error configuring filter graph\n");
2132  exit_program(1);
2133  }
2134 
2135  finish_output_stream(ost);
2136  }
2137 
2138  init_output_stream_wrapper(ost, NULL, 1);
2139  }
2140 
2141  if (enc->codec_type != AVMEDIA_TYPE_VIDEO && enc->codec_type != AVMEDIA_TYPE_AUDIO)
2142  continue;
2143 
2144  for (;;) {
2145  const char *desc = NULL;
2146  AVPacket pkt;
2147  int pkt_size;
2148 
2149  switch (enc->codec_type) {
2150  case AVMEDIA_TYPE_AUDIO:
2151  desc = "audio";
2152  break;
2153  case AVMEDIA_TYPE_VIDEO:
2154  desc = "video";
2155  break;
2156  default:
2157  av_assert0(0);
2158  }
2159 
2160  av_init_packet(&pkt);
2161  pkt.data = NULL;
2162  pkt.size = 0;
2163 
2164  update_benchmark(NULL);
2165 
2166  while ((ret = avcodec_receive_packet(enc, &pkt)) == AVERROR(EAGAIN)) {
2167  ret = avcodec_send_frame(enc, NULL);
2168  if (ret < 0) {
2169  av_log(NULL, AV_LOG_FATAL, "%s encoding failed: %s\n",
2170  desc,
2171  av_err2str(ret));
2172  exit_program(1);
2173  }
2174  }
2175 
2176  update_benchmark("flush_%s %d.%d", desc, ost->file_index, ost->index);
2177  if (ret < 0 && ret != AVERROR_EOF) {
2178  av_log(NULL, AV_LOG_FATAL, "%s encoding failed: %s\n",
2179  desc,
2180  av_err2str(ret));
2181  exit_program(1);
2182  }
2183  if (ost->logfile && enc->stats_out) {
2184  fprintf(ost->logfile, "%s", enc->stats_out);
2185  }
2186  if (ret == AVERROR_EOF) {
2187  output_packet(of, &pkt, ost, 1);
2188  break;
2189  }
2190  if (ost->finished & MUXER_FINISHED) {
2191  av_packet_unref(&pkt);
2192  continue;
2193  }
2194  av_packet_rescale_ts(&pkt, enc->time_base, ost->mux_timebase);
2195  pkt_size = pkt.size;
2196  output_packet(of, &pkt, ost, 0);
2197  if (ost->enc_ctx->codec_type == AVMEDIA_TYPE_VIDEO && vstats_filename) {
2198  do_video_stats(ost, pkt_size);
2199  }
2200  }
2201  }
2202 }
2203 
2204 /*
2205  * Check whether a packet from ist should be written into ost at this time
2206  */
2208 {
2209  OutputFile *of = output_files[ost->file_index];
2210  int ist_index = input_files[ist->file_index]->ist_index + ist->st->index;
2211 
2212  if (ost->source_index != ist_index)
2213  return 0;
2214 
2215  if (ost->finished)
2216  return 0;
2217 
2218  if (of->start_time != AV_NOPTS_VALUE && ist->pts < of->start_time)
2219  return 0;
2220 
2221  return 1;
2222 }
2223 
2224 static void do_streamcopy(InputStream *ist, OutputStream *ost, const AVPacket *pkt)
2225 {
2226  OutputFile *of = output_files[ost->file_index];
2227  InputFile *f = input_files [ist->file_index];
2228  int64_t start_time = (of->start_time == AV_NOPTS_VALUE) ? 0 : of->start_time;
2229  int64_t ost_tb_start_time = av_rescale_q(start_time, AV_TIME_BASE_Q, ost->mux_timebase);
2230  AVPacket opkt;
2231 
2232  // EOF: flush output bitstream filters.
2233  if (!pkt) {
2234  av_init_packet(&opkt);
2235  opkt.data = NULL;
2236  opkt.size = 0;
2237  output_packet(of, &opkt, ost, 1);
2238  return;
2239  }
2240 
2241  if ((!ost->frame_number && !(pkt->flags & AV_PKT_FLAG_KEY)) &&
2243  return;
2244 
2245  if (!ost->frame_number && !ost->copy_prior_start) {
2246  int64_t comp_start = start_time;
2247  if (copy_ts && f->start_time != AV_NOPTS_VALUE)
2248  comp_start = FFMAX(start_time, f->start_time + f->ts_offset);
2249  if (pkt->pts == AV_NOPTS_VALUE ?
2250  ist->pts < comp_start :
2251  pkt->pts < av_rescale_q(comp_start, AV_TIME_BASE_Q, ist->st->time_base))
2252  return;
2253  }
2254 
2255  if (of->recording_time != INT64_MAX &&
2256  ist->pts >= of->recording_time + start_time) {
2257  close_output_stream(ost);
2258  return;
2259  }
2260 
2261  if (f->recording_time != INT64_MAX) {
2262  start_time = f->ctx->start_time;
2263  if (f->start_time != AV_NOPTS_VALUE && copy_ts)
2264  start_time += f->start_time;
2265  if (ist->pts >= f->recording_time + start_time) {
2266  close_output_stream(ost);
2267  return;
2268  }
2269  }
2270 
2271  /* force the input stream PTS */
2272  if (ost->enc_ctx->codec_type == AVMEDIA_TYPE_VIDEO)
2273  ost->sync_opts++;
2274 
2275  if (av_packet_ref(&opkt, pkt) < 0)
2276  exit_program(1);
2277 
2278  if (pkt->pts != AV_NOPTS_VALUE)
2279  opkt.pts = av_rescale_q(pkt->pts, ist->st->time_base, ost->mux_timebase) - ost_tb_start_time;
2280 
2281  if (pkt->dts == AV_NOPTS_VALUE) {
2282  opkt.dts = av_rescale_q(ist->dts, AV_TIME_BASE_Q, ost->mux_timebase);
2283  } else if (ost->st->codecpar->codec_type == AVMEDIA_TYPE_AUDIO) {
2284  int duration = av_get_audio_frame_duration(ist->dec_ctx, pkt->size);
2285  if(!duration)
2286  duration = ist->dec_ctx->frame_size;
2287  opkt.dts = av_rescale_delta(ist->st->time_base, pkt->dts,
2288  (AVRational){1, ist->dec_ctx->sample_rate}, duration,
2290  /* dts will be set immediately afterwards to what pts is now */
2291  opkt.pts = opkt.dts - ost_tb_start_time;
2292  } else
2293  opkt.dts = av_rescale_q(pkt->dts, ist->st->time_base, ost->mux_timebase);
2294  opkt.dts -= ost_tb_start_time;
2295 
2296  opkt.duration = av_rescale_q(pkt->duration, ist->st->time_base, ost->mux_timebase);
2297 
2298  output_packet(of, &opkt, ost, 0);
2299 }
2300 
2302 {
2303  AVCodecContext *dec = ist->dec_ctx;
2304 
2305  if (!dec->channel_layout) {
2306  char layout_name[256];
2307 
2308  if (dec->channels > ist->guess_layout_max)
2309  return 0;
2310  dec->channel_layout = av_get_default_channel_layout(dec->channels);
2311  if (!dec->channel_layout)
2312  return 0;
2313  av_get_channel_layout_string(layout_name, sizeof(layout_name),
2314  dec->channels, dec->channel_layout);
2315  av_log(NULL, AV_LOG_WARNING, "Guessed Channel Layout for Input Stream "
2316  "#%d.%d : %s\n", ist->file_index, ist->st->index, layout_name);
2317  }
2318  return 1;
2319 }
2320 
2321 static void check_decode_result(InputStream *ist, int *got_output, int ret)
2322 {
2323  if (*got_output || ret<0)
2324  decode_error_stat[ret<0] ++;
2325 
2326  if (ret < 0 && exit_on_error)
2327  exit_program(1);
2328 
2329  if (*got_output && ist) {
2330  if (ist->decoded_frame->decode_error_flags || (ist->decoded_frame->flags & AV_FRAME_FLAG_CORRUPT)) {
2331  av_log(NULL, exit_on_error ? AV_LOG_FATAL : AV_LOG_WARNING,
2332  "%s: corrupt decoded frame in stream %d\n", input_files[ist->file_index]->ctx->url, ist->st->index);
2333  if (exit_on_error)
2334  exit_program(1);
2335  }
2336  }
2337 }
2338 
2339 // Filters can be configured only if the formats of all inputs are known.
2341 {
2342  int i;
2343  for (i = 0; i < fg->nb_inputs; i++) {
2344  if (fg->inputs[i]->format < 0 && (fg->inputs[i]->type == AVMEDIA_TYPE_AUDIO ||
2345  fg->inputs[i]->type == AVMEDIA_TYPE_VIDEO))
2346  return 0;
2347  }
2348  return 1;
2349 }
2350 
2351 static int ifilter_send_frame(InputFilter *ifilter, AVFrame *frame)
2352 {
2353  FilterGraph *fg = ifilter->graph;
2354  int need_reinit, ret, i;
2355 
2356  /* determine if the parameters for this input changed */
2357  need_reinit = ifilter->format != frame->format;
2358 
2359  switch (ifilter->ist->st->codecpar->codec_type) {
2360  case AVMEDIA_TYPE_AUDIO:
2361  need_reinit |= ifilter->sample_rate != frame->sample_rate ||
2362  ifilter->channels != frame->channels ||
2363  ifilter->channel_layout != frame->channel_layout;
2364  break;
2365  case AVMEDIA_TYPE_VIDEO:
2366  need_reinit |= ifilter->width != frame->width ||
2367  ifilter->height != frame->height;
2368  break;
2369  }
2370 
2371  if (!ifilter->ist->reinit_filters && fg->graph)
2372  need_reinit = 0;
2373 
2374  if (!!ifilter->hw_frames_ctx != !!frame->hw_frames_ctx ||
2375  (ifilter->hw_frames_ctx && ifilter->hw_frames_ctx->data != frame->hw_frames_ctx->data))
2376  need_reinit = 1;
2377 
2378  if (need_reinit) {
2379  ret = ifilter_parameters_from_frame(ifilter, frame);
2380  if (ret < 0)
2381  return ret;
2382  }
2383 
2384  /* (re)init the graph if possible, otherwise buffer the frame and return */
2385  if (need_reinit || !fg->graph) {
2386  for (i = 0; i < fg->nb_inputs; i++) {
2387  if (!ifilter_has_all_input_formats(fg)) {
2388  AVFrame *tmp = av_frame_clone(frame);
2389  if (!tmp)
2390  return AVERROR(ENOMEM);
2391  av_frame_unref(frame);
2392 
2393  if (!av_fifo_space(ifilter->frame_queue)) {
2394  ret = av_fifo_realloc2(ifilter->frame_queue, 2 * av_fifo_size(ifilter->frame_queue));
2395  if (ret < 0) {
2396  av_frame_free(&tmp);
2397  return ret;
2398  }
2399  }
2400  av_fifo_generic_write(ifilter->frame_queue, &tmp, sizeof(tmp), NULL);
2401  return 0;
2402  }
2403  }
2404 
2405  ret = reap_filters(1);
2406  if (ret < 0 && ret != AVERROR_EOF) {
2407  av_log(NULL, AV_LOG_ERROR, "Error while filtering: %s\n", av_err2str(ret));
2408  return ret;
2409  }
2410 
2411  ret = configure_filtergraph(fg);
2412  if (ret < 0) {
2413  av_log(NULL, AV_LOG_ERROR, "Error reinitializing filters!\n");
2414  return ret;
2415  }
2416  }
2417 
2418  ret = av_buffersrc_add_frame_flags(ifilter->filter, frame, AV_BUFFERSRC_FLAG_PUSH);
2419  if (ret < 0) {
2420  if (ret != AVERROR_EOF)
2421  av_log(NULL, AV_LOG_ERROR, "Error while filtering: %s\n", av_err2str(ret));
2422  return ret;
2423  }
2424 
2425  return 0;
2426 }
2427 
2428 static int ifilter_send_eof(InputFilter *ifilter, int64_t pts)
2429 {
2430  int ret = 0;
2431 
2432  ifilter->eof = 1;
2433 
2434  if (ifilter->filter) {
2435 
2436  /* THIS VALIDATION IS REQUIRED TO COMPLETE CANCELLATION */
2438  ret = av_buffersrc_close(ifilter->filter, pts, AV_BUFFERSRC_FLAG_PUSH);
2439  }
2440  if (ret < 0)
2441  return ret;
2442  } else {
2443  // the filtergraph was never configured
2444  if (ifilter->format < 0)
2445  ifilter_parameters_from_codecpar(ifilter, ifilter->ist->st->codecpar);
2446  if (ifilter->format < 0 && (ifilter->type == AVMEDIA_TYPE_AUDIO || ifilter->type == AVMEDIA_TYPE_VIDEO)) {
2447  av_log(NULL, AV_LOG_ERROR, "Cannot determine format of input stream %d:%d after EOF\n", ifilter->ist->file_index, ifilter->ist->st->index);
2448  return AVERROR_INVALIDDATA;
2449  }
2450  }
2451 
2452  return 0;
2453 }
2454 
2455 // This does not quite work like avcodec_decode_audio4/avcodec_decode_video2.
2456 // There is the following difference: if you got a frame, you must call
2457 // it again with pkt=NULL. pkt==NULL is treated differently from pkt->size==0
2458 // (pkt==NULL means get more output, pkt->size==0 is a flush/drain packet)
2459 static int decode(AVCodecContext *avctx, AVFrame *frame, int *got_frame, AVPacket *pkt)
2460 {
2461  int ret;
2462 
2463  *got_frame = 0;
2464 
2465  if (pkt) {
2466  ret = avcodec_send_packet(avctx, pkt);
2467  // In particular, we don't expect AVERROR(EAGAIN), because we read all
2468  // decoded frames with avcodec_receive_frame() until done.
2469  if (ret < 0 && ret != AVERROR_EOF)
2470  return ret;
2471  }
2472 
2473  ret = avcodec_receive_frame(avctx, frame);
2474  if (ret < 0 && ret != AVERROR(EAGAIN))
2475  return ret;
2476  if (ret >= 0)
2477  *got_frame = 1;
2478 
2479  return 0;
2480 }
2481 
2483 {
2484  int i, ret;
2485  AVFrame *f;
2486 
2487  av_assert1(ist->nb_filters > 0); /* ensure ret is initialized */
2488  for (i = 0; i < ist->nb_filters; i++) {
2489  if (i < ist->nb_filters - 1) {
2490  f = ist->filter_frame;
2491  ret = av_frame_ref(f, decoded_frame);
2492  if (ret < 0)
2493  break;
2494  } else
2495  f = decoded_frame;
2496  ret = ifilter_send_frame(ist->filters[i], f);
2497  if (ret == AVERROR_EOF)
2498  ret = 0; /* ignore */
2499  if (ret < 0) {
2500  av_log(NULL, AV_LOG_ERROR,
2501  "Failed to inject frame into filter network: %s\n", av_err2str(ret));
2502  break;
2503  }
2504  }
2505  return ret;
2506 }
2507 
2508 static int decode_audio(InputStream *ist, AVPacket *pkt, int *got_output,
2509  int *decode_failed)
2510 {
2511  AVFrame *decoded_frame;
2512  AVCodecContext *avctx = ist->dec_ctx;
2513  int ret, err = 0;
2514  AVRational decoded_frame_tb;
2515 
2516  if (!ist->decoded_frame && !(ist->decoded_frame = av_frame_alloc()))
2517  return AVERROR(ENOMEM);
2518  if (!ist->filter_frame && !(ist->filter_frame = av_frame_alloc()))
2519  return AVERROR(ENOMEM);
2521 
2522  update_benchmark(NULL);
2523  ret = decode(avctx, decoded_frame, got_output, pkt);
2524  update_benchmark("decode_audio %d.%d", ist->file_index, ist->st->index);
2525  if (ret < 0)
2526  *decode_failed = 1;
2527 
2528  if (ret >= 0 && avctx->sample_rate <= 0) {
2529  av_log(avctx, AV_LOG_ERROR, "Sample rate %d invalid\n", avctx->sample_rate);
2530  ret = AVERROR_INVALIDDATA;
2531  }
2532 
2533  if (ret != AVERROR_EOF)
2535 
2536  if (!*got_output || ret < 0)
2537  return ret;
2538 
2539  ist->samples_decoded += decoded_frame->nb_samples;
2540  ist->frames_decoded++;
2541 
2542  /* increment next_dts to use for the case where the input stream does not
2543  have timestamps or there are multiple frames in the packet */
2544  ist->next_pts += ((int64_t)AV_TIME_BASE * decoded_frame->nb_samples) /
2545  avctx->sample_rate;
2546  ist->next_dts += ((int64_t)AV_TIME_BASE * decoded_frame->nb_samples) /
2547  avctx->sample_rate;
2548 
2549  if (decoded_frame->pts != AV_NOPTS_VALUE) {
2550  decoded_frame_tb = ist->st->time_base;
2551  } else if (pkt && pkt->pts != AV_NOPTS_VALUE) {
2552  decoded_frame->pts = pkt->pts;
2553  decoded_frame_tb = ist->st->time_base;
2554  }else {
2555  decoded_frame->pts = ist->dts;
2556  decoded_frame_tb = AV_TIME_BASE_Q;
2557  }
2558  if (decoded_frame->pts != AV_NOPTS_VALUE)
2559  decoded_frame->pts = av_rescale_delta(decoded_frame_tb, decoded_frame->pts,
2560  (AVRational){1, avctx->sample_rate}, decoded_frame->nb_samples, &ist->filter_in_rescale_delta_last,
2561  (AVRational){1, avctx->sample_rate});
2562  ist->nb_samples = decoded_frame->nb_samples;
2564 
2565  av_frame_unref(ist->filter_frame);
2566  av_frame_unref(decoded_frame);
2567  return err < 0 ? err : ret;
2568 }
2569 
2570 static int decode_video(InputStream *ist, AVPacket *pkt, int *got_output, int64_t *duration_pts, int eof,
2571  int *decode_failed)
2572 {
2573  AVFrame *decoded_frame;
2574  int i, ret = 0, err = 0;
2575  int64_t best_effort_timestamp;
2576  int64_t dts = AV_NOPTS_VALUE;
2577  AVPacket avpkt;
2578 
2579  // With fate-indeo3-2, we're getting 0-sized packets before EOF for some
2580  // reason. This seems like a semi-critical bug. Don't trigger EOF, and
2581  // skip the packet.
2582  if (!eof && pkt && pkt->size == 0)
2583  return 0;
2584 
2585  if (!ist->decoded_frame && !(ist->decoded_frame = av_frame_alloc()))
2586  return AVERROR(ENOMEM);
2587  if (!ist->filter_frame && !(ist->filter_frame = av_frame_alloc()))
2588  return AVERROR(ENOMEM);
2590  if (ist->dts != AV_NOPTS_VALUE)
2591  dts = av_rescale_q(ist->dts, AV_TIME_BASE_Q, ist->st->time_base);
2592  if (pkt) {
2593  avpkt = *pkt;
2594  avpkt.dts = dts; // ffmpeg.c probably shouldn't do this
2595  }
2596 
2597  // The old code used to set dts on the drain packet, which does not work
2598  // with the new API anymore.
2599  if (eof) {
2600  void *new = av_realloc_array(ist->dts_buffer, ist->nb_dts_buffer + 1, sizeof(ist->dts_buffer[0]));
2601  if (!new)
2602  return AVERROR(ENOMEM);
2603  ist->dts_buffer = new;
2604  ist->dts_buffer[ist->nb_dts_buffer++] = dts;
2605  }
2606 
2607  update_benchmark(NULL);
2608  ret = decode(ist->dec_ctx, decoded_frame, got_output, pkt ? &avpkt : NULL);
2609  update_benchmark("decode_video %d.%d", ist->file_index, ist->st->index);
2610  if (ret < 0)
2611  *decode_failed = 1;
2612 
2613  // The following line may be required in some cases where there is no parser
2614  // or the parser does not has_b_frames correctly
2615  if (ist->st->codecpar->video_delay < ist->dec_ctx->has_b_frames) {
2616  if (ist->dec_ctx->codec_id == AV_CODEC_ID_H264) {
2617  ist->st->codecpar->video_delay = ist->dec_ctx->has_b_frames;
2618  } else
2619  av_log(ist->dec_ctx, AV_LOG_WARNING,
2620  "video_delay is larger in decoder than demuxer %d > %d.\n"
2621  "If you want to help, upload a sample "
2622  "of this file to https://streams.videolan.org/upload/ "
2623  "and contact the ffmpeg-devel mailing list. (ffmpeg-devel@ffmpeg.org)\n",
2624  ist->dec_ctx->has_b_frames,
2625  ist->st->codecpar->video_delay);
2626  }
2627 
2628  if (ret != AVERROR_EOF)
2630 
2631  if (*got_output && ret >= 0) {
2632  if (ist->dec_ctx->width != decoded_frame->width ||
2633  ist->dec_ctx->height != decoded_frame->height ||
2634  ist->dec_ctx->pix_fmt != decoded_frame->format) {
2635  av_log(NULL, AV_LOG_DEBUG, "Frame parameters mismatch context %d,%d,%d != %d,%d,%d\n",
2636  decoded_frame->width,
2637  decoded_frame->height,
2638  decoded_frame->format,
2639  ist->dec_ctx->width,
2640  ist->dec_ctx->height,
2641  ist->dec_ctx->pix_fmt);
2642  }
2643  }
2644 
2645  if (!*got_output || ret < 0)
2646  return ret;
2647 
2648  if(ist->top_field_first>=0)
2649  decoded_frame->top_field_first = ist->top_field_first;
2650 
2651  ist->frames_decoded++;
2652 
2653  if (ist->hwaccel_retrieve_data && decoded_frame->format == ist->hwaccel_pix_fmt) {
2654  err = ist->hwaccel_retrieve_data(ist->dec_ctx, decoded_frame);
2655  if (err < 0)
2656  goto fail;
2657  }
2659 
2660  best_effort_timestamp= decoded_frame->best_effort_timestamp;
2661  *duration_pts = decoded_frame->pkt_duration;
2662 
2663  if (ist->framerate.num)
2664  best_effort_timestamp = ist->cfr_next_pts++;
2665 
2666  if (eof && best_effort_timestamp == AV_NOPTS_VALUE && ist->nb_dts_buffer > 0) {
2667  best_effort_timestamp = ist->dts_buffer[0];
2668 
2669  for (i = 0; i < ist->nb_dts_buffer - 1; i++)
2670  ist->dts_buffer[i] = ist->dts_buffer[i + 1];
2671  ist->nb_dts_buffer--;
2672  }
2673 
2674  if(best_effort_timestamp != AV_NOPTS_VALUE) {
2675  int64_t ts = av_rescale_q(decoded_frame->pts = best_effort_timestamp, ist->st->time_base, AV_TIME_BASE_Q);
2676 
2677  if (ts != AV_NOPTS_VALUE)
2678  ist->next_pts = ist->pts = ts;
2679  }
2680 
2681  if (debug_ts) {
2682  av_log(NULL, AV_LOG_INFO, "decoder -> ist_index:%d type:video "
2683  "frame_pts:%s frame_pts_time:%s best_effort_ts:%"PRId64" best_effort_ts_time:%s keyframe:%d frame_type:%d time_base:%d/%d\n",
2684  ist->st->index, av_ts2str(decoded_frame->pts),
2685  av_ts2timestr(decoded_frame->pts, &ist->st->time_base),
2686  best_effort_timestamp,
2687  av_ts2timestr(best_effort_timestamp, &ist->st->time_base),
2688  decoded_frame->key_frame, decoded_frame->pict_type,
2689  ist->st->time_base.num, ist->st->time_base.den);
2690  }
2691 
2692  if (ist->st->sample_aspect_ratio.num)
2693  decoded_frame->sample_aspect_ratio = ist->st->sample_aspect_ratio;
2694 
2696 
2697 fail:
2698  av_frame_unref(ist->filter_frame);
2699  av_frame_unref(decoded_frame);
2700  return err < 0 ? err : ret;
2701 }
2702 
2703 static int transcode_subtitles(InputStream *ist, AVPacket *pkt, int *got_output,
2704  int *decode_failed)
2705 {
2706  AVSubtitle subtitle;
2707  int free_sub = 1;
2708  int i, ret = avcodec_decode_subtitle2(ist->dec_ctx,
2709  &subtitle, got_output, pkt);
2710 
2712 
2713  if (ret < 0 || !*got_output) {
2714  *decode_failed = 1;
2715  if (!pkt->size)
2716  sub2video_flush(ist);
2717  return ret;
2718  }
2719 
2720  if (ist->fix_sub_duration) {
2721  int end = 1;
2722  if (ist->prev_sub.got_output) {
2723  end = av_rescale(subtitle.pts - ist->prev_sub.subtitle.pts,
2724  1000, AV_TIME_BASE);
2725  if (end < ist->prev_sub.subtitle.end_display_time) {
2726  av_log(ist->dec_ctx, AV_LOG_DEBUG,
2727  "Subtitle duration reduced from %"PRId32" to %d%s\n",
2728  ist->prev_sub.subtitle.end_display_time, end,
2729  end <= 0 ? ", dropping it" : "");
2730  ist->prev_sub.subtitle.end_display_time = end;
2731  }
2732  }
2733  FFSWAP(int, *got_output, ist->prev_sub.got_output);
2734  FFSWAP(int, ret, ist->prev_sub.ret);
2735  FFSWAP(AVSubtitle, subtitle, ist->prev_sub.subtitle);
2736  if (end <= 0)
2737  goto out;
2738  }
2739 
2740  if (!*got_output)
2741  return ret;
2742 
2743  if (ist->sub2video.frame) {
2744  sub2video_update(ist, INT64_MIN, &subtitle);
2745  } else if (ist->nb_filters) {
2746  if (!ist->sub2video.sub_queue)
2747  ist->sub2video.sub_queue = av_fifo_alloc(8 * sizeof(AVSubtitle));
2748  if (!ist->sub2video.sub_queue)
2749  exit_program(1);
2750  if (!av_fifo_space(ist->sub2video.sub_queue)) {
2751  ret = av_fifo_realloc2(ist->sub2video.sub_queue, 2 * av_fifo_size(ist->sub2video.sub_queue));
2752  if (ret < 0)
2753  exit_program(1);
2754  }
2755  av_fifo_generic_write(ist->sub2video.sub_queue, &subtitle, sizeof(subtitle), NULL);
2756  free_sub = 0;
2757  }
2758 
2759  if (!subtitle.num_rects)
2760  goto out;
2761 
2762  ist->frames_decoded++;
2763 
2764  for (i = 0; i < nb_output_streams; i++) {
2765  OutputStream *ost = output_streams[i];
2766 
2767  if (!check_output_constraints(ist, ost) || !ost->encoding_needed
2768  || ost->enc->type != AVMEDIA_TYPE_SUBTITLE)
2769  continue;
2770 
2772  }
2773 
2774 out:
2775  if (free_sub)
2776  avsubtitle_free(&subtitle);
2777  return ret;
2778 }
2779 
2781 {
2782  int i, ret;
2783  /* TODO keep pts also in stream time base to avoid converting back */
2784  int64_t pts = av_rescale_q_rnd(ist->pts, AV_TIME_BASE_Q, ist->st->time_base,
2785  AV_ROUND_NEAR_INF | AV_ROUND_PASS_MINMAX);
2786 
2787  for (i = 0; i < ist->nb_filters; i++) {
2788  ret = ifilter_send_eof(ist->filters[i], pts);
2789  if (ret < 0)
2790  return ret;
2791  }
2792  return 0;
2793 }
2794 
2795 /* pkt = NULL means EOF (needed to flush decoder buffers) */
2796 static int process_input_packet(InputStream *ist, const AVPacket *pkt, int no_eof)
2797 {
2798  int ret = 0, i;
2799  int repeating = 0;
2800  int eof_reached = 0;
2801 
2802  AVPacket avpkt;
2803  if (!ist->saw_first_ts) {
2804  ist->dts = ist->st->avg_frame_rate.num ? - ist->dec_ctx->has_b_frames * AV_TIME_BASE / av_q2d(ist->st->avg_frame_rate) : 0;
2805  ist->pts = 0;
2806  if (pkt && pkt->pts != AV_NOPTS_VALUE && !ist->decoding_needed) {
2807  ist->dts += av_rescale_q(pkt->pts, ist->st->time_base, AV_TIME_BASE_Q);
2808  ist->pts = ist->dts; //unused but better to set it to a value thats not totally wrong
2809  }
2810  ist->saw_first_ts = 1;
2811  }
2812 
2813  if (ist->next_dts == AV_NOPTS_VALUE)
2814  ist->next_dts = ist->dts;
2815  if (ist->next_pts == AV_NOPTS_VALUE)
2816  ist->next_pts = ist->pts;
2817 
2818  if (!pkt) {
2819  /* EOF handling */
2820  av_init_packet(&avpkt);
2821  avpkt.data = NULL;
2822  avpkt.size = 0;
2823  } else {
2824  avpkt = *pkt;
2825  }
2826 
2827  if (pkt && pkt->dts != AV_NOPTS_VALUE) {
2828  ist->next_dts = ist->dts = av_rescale_q(pkt->dts, ist->st->time_base, AV_TIME_BASE_Q);
2829  if (ist->dec_ctx->codec_type != AVMEDIA_TYPE_VIDEO || !ist->decoding_needed)
2830  ist->next_pts = ist->pts = ist->dts;
2831  }
2832 
2833  // while we have more to decode or while the decoder did output something on EOF
2834  while (ist->decoding_needed) {
2835  int64_t duration_dts = 0;
2836  int64_t duration_pts = 0;
2837  int got_output = 0;
2838  int decode_failed = 0;
2839 
2840  ist->pts = ist->next_pts;
2841  ist->dts = ist->next_dts;
2842 
2843  switch (ist->dec_ctx->codec_type) {
2844  case AVMEDIA_TYPE_AUDIO:
2845  ret = decode_audio (ist, repeating ? NULL : &avpkt, &got_output,
2846  &decode_failed);
2847  break;
2848  case AVMEDIA_TYPE_VIDEO:
2849  ret = decode_video (ist, repeating ? NULL : &avpkt, &got_output, &duration_pts, !pkt,
2850  &decode_failed);
2851  if (!repeating || !pkt || got_output) {
2852  if (pkt && pkt->duration) {
2853  duration_dts = av_rescale_q(pkt->duration, ist->st->time_base, AV_TIME_BASE_Q);
2854  } else if(ist->dec_ctx->framerate.num != 0 && ist->dec_ctx->framerate.den != 0) {
2855  int ticks= av_stream_get_parser(ist->st) ? av_stream_get_parser(ist->st)->repeat_pict+1 : ist->dec_ctx->ticks_per_frame;
2856  duration_dts = ((int64_t)AV_TIME_BASE *
2857  ist->dec_ctx->framerate.den * ticks) /
2858  ist->dec_ctx->framerate.num / ist->dec_ctx->ticks_per_frame;
2859  }
2860 
2861  if(ist->dts != AV_NOPTS_VALUE && duration_dts) {
2862  ist->next_dts += duration_dts;
2863  }else
2864  ist->next_dts = AV_NOPTS_VALUE;
2865  }
2866 
2867  if (got_output) {
2868  if (duration_pts > 0) {
2869  ist->next_pts += av_rescale_q(duration_pts, ist->st->time_base, AV_TIME_BASE_Q);
2870  } else {
2871  ist->next_pts += duration_dts;
2872  }
2873  }
2874  break;
2875  case AVMEDIA_TYPE_SUBTITLE:
2876  if (repeating)
2877  break;
2878  ret = transcode_subtitles(ist, &avpkt, &got_output, &decode_failed);
2879  if (!pkt && ret >= 0)
2880  ret = AVERROR_EOF;
2881  break;
2882  default:
2883  return -1;
2884  }
2885 
2886  if (ret == AVERROR_EOF) {
2887  eof_reached = 1;
2888  break;
2889  }
2890 
2891  if (ret < 0) {
2892  if (decode_failed) {
2893  av_log(NULL, AV_LOG_ERROR, "Error while decoding stream #%d:%d: %s\n",
2894  ist->file_index, ist->st->index, av_err2str(ret));
2895  } else {
2896  av_log(NULL, AV_LOG_FATAL, "Error while processing the decoded "
2897  "data for stream #%d:%d\n", ist->file_index, ist->st->index);
2898  }
2899  if (!decode_failed || exit_on_error)
2900  exit_program(1);
2901  break;
2902  }
2903 
2904  if (got_output)
2905  ist->got_output = 1;
2906 
2907  if (!got_output)
2908  break;
2909 
2910  // During draining, we might get multiple output frames in this loop.
2911  // ffmpeg.c does not drain the filter chain on configuration changes,
2912  // which means if we send multiple frames at once to the filters, and
2913  // one of those frames changes configuration, the buffered frames will
2914  // be lost. This can upset certain FATE tests.
2915  // Decode only 1 frame per call on EOF to appease these FATE tests.
2916  // The ideal solution would be to rewrite decoding to use the new
2917  // decoding API in a better way.
2918  if (!pkt)
2919  break;
2920 
2921  repeating = 1;
2922  }
2923 
2924  /* after flushing, send an EOF on all the filter inputs attached to the stream */
2925  /* except when looping we need to flush but not to send an EOF */
2926  if (!pkt && ist->decoding_needed && eof_reached && !no_eof) {
2927  int ret = send_filter_eof(ist);
2928  if (ret < 0) {
2929  av_log(NULL, AV_LOG_FATAL, "Error marking filters as finished\n");
2930  exit_program(1);
2931  }
2932  }
2933 
2934  /* handle stream copy */
2935  if (!ist->decoding_needed && pkt) {
2936  ist->dts = ist->next_dts;
2937  switch (ist->dec_ctx->codec_type) {
2938  case AVMEDIA_TYPE_AUDIO:
2939  av_assert1(pkt->duration >= 0);
2940  if (ist->dec_ctx->sample_rate) {
2941  ist->next_dts += ((int64_t)AV_TIME_BASE * ist->dec_ctx->frame_size) /
2942  ist->dec_ctx->sample_rate;
2943  } else {
2944  ist->next_dts += av_rescale_q(pkt->duration, ist->st->time_base, AV_TIME_BASE_Q);
2945  }
2946  break;
2947  case AVMEDIA_TYPE_VIDEO:
2948  if (ist->framerate.num) {
2949  // TODO: Remove work-around for c99-to-c89 issue 7
2950  AVRational time_base_q = AV_TIME_BASE_Q;
2951  int64_t next_dts = av_rescale_q(ist->next_dts, time_base_q, av_inv_q(ist->framerate));
2952  ist->next_dts = av_rescale_q(next_dts + 1, av_inv_q(ist->framerate), time_base_q);
2953  } else if (pkt->duration) {
2954  ist->next_dts += av_rescale_q(pkt->duration, ist->st->time_base, AV_TIME_BASE_Q);
2955  } else if(ist->dec_ctx->framerate.num != 0) {
2956  int ticks= av_stream_get_parser(ist->st) ? av_stream_get_parser(ist->st)->repeat_pict + 1 : ist->dec_ctx->ticks_per_frame;
2957  ist->next_dts += ((int64_t)AV_TIME_BASE *
2958  ist->dec_ctx->framerate.den * ticks) /
2959  ist->dec_ctx->framerate.num / ist->dec_ctx->ticks_per_frame;
2960  }
2961  break;
2962  }
2963  ist->pts = ist->dts;
2964  ist->next_pts = ist->next_dts;
2965  }
2966  for (i = 0; i < nb_output_streams; i++) {
2967  OutputStream *ost = output_streams[i];
2968 
2969  if (!check_output_constraints(ist, ost) || ost->encoding_needed)
2970  continue;
2971 
2972  do_streamcopy(ist, ost, pkt);
2973  }
2974 
2975  return !eof_reached;
2976 }
2977 
2978 static void print_sdp(void)
2979 {
2980  char sdp[16384];
2981  int i;
2982  int j;
2983  AVIOContext *sdp_pb;
2984  AVFormatContext **avc;
2985 
2986  for (i = 0; i < nb_output_files; i++) {
2987  if (!output_files[i]->header_written)
2988  return;
2989  }
2990 
2991  avc = av_malloc_array(nb_output_files, sizeof(*avc));
2992  if (!avc)
2993  exit_program(1);
2994  for (i = 0, j = 0; i < nb_output_files; i++) {
2995  if (!strcmp(output_files[i]->ctx->oformat->name, "rtp")) {
2996  avc[j] = output_files[i]->ctx;
2997  j++;
2998  }
2999  }
3000 
3001  if (!j)
3002  goto fail;
3003 
3004  av_sdp_create(avc, j, sdp, sizeof(sdp));
3005 
3006  if (!sdp_filename) {
3007  av_log(NULL, AV_LOG_STDERR, "SDP:\n%s\n", sdp);
3008  fflush(stdout);
3009  } else {
3010  if (avio_open2(&sdp_pb, sdp_filename, AVIO_FLAG_WRITE, &int_cb, NULL) < 0) {
3011  av_log(NULL, AV_LOG_ERROR, "Failed to open sdp file '%s'\n", sdp_filename);
3012  } else {
3013  avio_print(sdp_pb, sdp);
3014  avio_closep(&sdp_pb);
3015  av_freep(&sdp_filename);
3016  }
3017  }
3018 
3019 fail:
3020  av_freep(&avc);
3021 }
3022 
3023 static enum AVPixelFormat get_format(AVCodecContext *s, const enum AVPixelFormat *pix_fmts)
3024 {
3025  InputStream *ist = s->opaque;
3026  const enum AVPixelFormat *p;
3027  int ret;
3028 
3029  for (p = pix_fmts; *p != AV_PIX_FMT_NONE; p++) {
3030  const AVPixFmtDescriptor *desc = av_pix_fmt_desc_get(*p);
3031  const AVCodecHWConfig *config = NULL;
3032  int i;
3033 
3034  if (!(desc->flags & AV_PIX_FMT_FLAG_HWACCEL))
3035  break;
3036 
3037  if (ist->hwaccel_id == HWACCEL_GENERIC ||
3038  ist->hwaccel_id == HWACCEL_AUTO) {
3039  for (i = 0;; i++) {
3040  config = avcodec_get_hw_config(s->codec, i);
3041  if (!config)
3042  break;
3043  if (!(config->methods &
3044  AV_CODEC_HW_CONFIG_METHOD_HW_DEVICE_CTX))
3045  continue;
3046  if (config->pix_fmt == *p)
3047  break;
3048  }
3049  }
3050  if (config) {
3051  if (config->device_type != ist->hwaccel_device_type) {
3052  // Different hwaccel offered, ignore.
3053  continue;
3054  }
3055 
3056  ret = hwaccel_decode_init(s);
3057  if (ret < 0) {
3058  if (ist->hwaccel_id == HWACCEL_GENERIC) {
3059  av_log(NULL, AV_LOG_FATAL,
3060  "%s hwaccel requested for input stream #%d:%d, "
3061  "but cannot be initialized.\n",
3062  av_hwdevice_get_type_name(config->device_type),
3063  ist->file_index, ist->st->index);
3064  return AV_PIX_FMT_NONE;
3065  }
3066  continue;
3067  }
3068  } else {
3069  const HWAccel *hwaccel = NULL;
3070  int i;
3071  for (i = 0; hwaccels[i].name; i++) {
3072  if (hwaccels[i].pix_fmt == *p) {
3073  hwaccel = &hwaccels[i];
3074  break;
3075  }
3076  }
3077  if (!hwaccel) {
3078  // No hwaccel supporting this pixfmt.
3079  continue;
3080  }
3081  if (hwaccel->id != ist->hwaccel_id) {
3082  // Does not match requested hwaccel.
3083  continue;
3084  }
3085 
3086  ret = hwaccel->init(s);
3087  if (ret < 0) {
3088  av_log(NULL, AV_LOG_FATAL,
3089  "%s hwaccel requested for input stream #%d:%d, "
3090  "but cannot be initialized.\n", hwaccel->name,
3091  ist->file_index, ist->st->index);
3092  return AV_PIX_FMT_NONE;
3093  }
3094  }
3095 
3096  if (ist->hw_frames_ctx) {
3097  s->hw_frames_ctx = av_buffer_ref(ist->hw_frames_ctx);
3098  if (!s->hw_frames_ctx)
3099  return AV_PIX_FMT_NONE;
3100  }
3101 
3102  ist->hwaccel_pix_fmt = *p;
3103  break;
3104  }
3105 
3106  return *p;
3107 }
3108 
3109 static int get_buffer(AVCodecContext *s, AVFrame *frame, int flags)
3110 {
3111  InputStream *ist = s->opaque;
3112 
3113  if (ist->hwaccel_get_buffer && frame->format == ist->hwaccel_pix_fmt)
3114  return ist->hwaccel_get_buffer(s, frame, flags);
3115 
3116  return avcodec_default_get_buffer2(s, frame, flags);
3117 }
3118 
3119 static int init_input_stream(int ist_index, char *error, int error_len)
3120 {
3121  int ret;
3122  InputStream *ist = input_streams[ist_index];
3123 
3124  if (ist->decoding_needed) {
3125  AVCodec *codec = ist->dec;
3126  if (!codec) {
3127  snprintf(error, error_len, "Decoder (codec %s) not found for input stream #%d:%d",
3128  avcodec_get_name(ist->dec_ctx->codec_id), ist->file_index, ist->st->index);
3129  return AVERROR(EINVAL);
3130  }
3131 
3132  ist->dec_ctx->opaque = ist;
3133  ist->dec_ctx->get_format = get_format;
3134  ist->dec_ctx->get_buffer2 = get_buffer;
3135 #if LIBAVCODEC_VERSION_MAJOR < 60
3136  ist->dec_ctx->thread_safe_callbacks = 1;
3137 #endif
3138 
3139  av_opt_set_int(ist->dec_ctx, "refcounted_frames", 1, 0);
3140  if (ist->dec_ctx->codec_id == AV_CODEC_ID_DVB_SUBTITLE &&
3141  (ist->decoding_needed & DECODING_FOR_OST)) {
3142  av_dict_set(&ist->decoder_opts, "compute_edt", "1", AV_DICT_DONT_OVERWRITE);
3144  av_log(NULL, AV_LOG_WARNING, "Warning using DVB subtitles for filtering and output at the same time is not fully supported, also see -compute_edt [0|1]\n");
3145  }
3146 
3147  av_dict_set(&ist->decoder_opts, "sub_text_format", "ass", AV_DICT_DONT_OVERWRITE);
3148 
3149  /* Useful for subtitles retiming by lavf (FIXME), skipping samples in
3150  * audio, and video decoders such as cuvid or mediacodec */
3151  ist->dec_ctx->pkt_timebase = ist->st->time_base;
3152 
3153  if (!av_dict_get(ist->decoder_opts, "threads", NULL, 0))
3154  av_dict_set(&ist->decoder_opts, "threads", "auto", 0);
3155  /* Attached pics are sparse, therefore we would not want to delay their decoding till EOF. */
3156  if (ist->st->disposition & AV_DISPOSITION_ATTACHED_PIC)
3157  av_dict_set(&ist->decoder_opts, "threads", "1", 0);
3158 
3160  if (ret < 0) {
3161  snprintf(error, error_len, "Device setup failed for "
3162  "decoder on input stream #%d:%d : %s",
3163  ist->file_index, ist->st->index, av_err2str(ret));
3164  return ret;
3165  }
3166 
3167  if ((ret = avcodec_open2(ist->dec_ctx, codec, &ist->decoder_opts)) < 0) {
3168  if (ret == AVERROR_EXPERIMENTAL)
3169  abort_codec_experimental(codec, 0);
3170 
3171  snprintf(error, error_len,
3172  "Error while opening decoder for input stream "
3173  "#%d:%d : %s",
3174  ist->file_index, ist->st->index, av_err2str(ret));
3175  return ret;
3176  }
3178  }
3179 
3180  ist->next_pts = AV_NOPTS_VALUE;
3181  ist->next_dts = AV_NOPTS_VALUE;
3182 
3183  return 0;
3184 }
3185 
3187 {
3188  if (ost->source_index >= 0)
3189  return input_streams[ost->source_index];
3190  return NULL;
3191 }
3192 
3193 static int compare_int64(const void *a, const void *b)
3194 {
3195  return FFDIFFSIGN(*(const int64_t *)a, *(const int64_t *)b);
3196 }
3197 
3198 /* open the muxer when all the streams are initialized */
3200 {
3201  int ret, i;
3202 
3203  for (i = 0; i < of->ctx->nb_streams; i++) {
3204  OutputStream *ost = output_streams[of->ost_index + i];
3205  if (!ost->initialized)
3206  return 0;
3207  }
3208 
3209  of->ctx->interrupt_callback = int_cb;
3210 
3211  ret = avformat_write_header(of->ctx, &of->opts);
3212  if (ret < 0) {
3213  av_log(NULL, AV_LOG_ERROR,
3214  "Could not write header for output file #%d "
3215  "(incorrect codec parameters ?): %s\n",
3216  file_index, av_err2str(ret));
3217  return ret;
3218  }
3219  //assert_avoptions(of->opts);
3220  of->header_written = 1;
3221 
3222  av_dump_format(of->ctx, file_index, of->ctx->url, 1);
3223  nb_output_dumped++;
3224 
3225  if (sdp_filename || want_sdp)
3226  print_sdp();
3227 
3228  /* flush the muxing queues */
3229  for (i = 0; i < of->ctx->nb_streams; i++) {
3230  OutputStream *ost = output_streams[of->ost_index + i];
3231 
3232  /* try to improve muxing time_base (only possible if nothing has been written yet) */
3233  if (!av_fifo_size(ost->muxing_queue))
3234  ost->mux_timebase = ost->st->time_base;
3235 
3236  while (av_fifo_size(ost->muxing_queue)) {
3237  AVPacket pkt;
3238  av_fifo_generic_read(ost->muxing_queue, &pkt, sizeof(pkt), NULL);
3239  ost->muxing_queue_data_size -= pkt.size;
3240  write_packet(of, &pkt, ost, 1);
3241  }
3242  }
3243 
3244  return 0;
3245 }
3246 
3248 {
3249  AVBSFContext *ctx = ost->bsf_ctx;
3250  int ret;
3251 
3252  if (!ctx)
3253  return 0;
3254 
3255  ret = avcodec_parameters_copy(ctx->par_in, ost->st->codecpar);
3256  if (ret < 0)
3257  return ret;
3258 
3259  ctx->time_base_in = ost->st->time_base;
3260 
3261  ret = av_bsf_init(ctx);
3262  if (ret < 0) {
3263  av_log(NULL, AV_LOG_ERROR, "Error initializing bitstream filter: %s\n",
3264  ctx->filter->name);
3265  return ret;
3266  }
3267 
3268  ret = avcodec_parameters_copy(ost->st->codecpar, ctx->par_out);
3269  if (ret < 0)
3270  return ret;
3271 
3272  ost->st->time_base = ctx->time_base_out;
3273 
3274  return 0;
3275 }
3276 
3278 {
3279  OutputFile *of = output_files[ost->file_index];
3280  InputStream *ist = get_input_stream(ost);
3281  AVCodecParameters *par_dst = ost->st->codecpar;
3282  AVCodecParameters *par_src = ost->ref_par;
3283  AVRational sar;
3284  int i, ret;
3285  uint32_t codec_tag = par_dst->codec_tag;
3286 
3287  av_assert0(ist && !ost->filter);
3288 
3289  ret = avcodec_parameters_to_context(ost->enc_ctx, ist->st->codecpar);
3290  if (ret >= 0)
3291  ret = av_opt_set_dict(ost->enc_ctx, &ost->encoder_opts);
3292  if (ret < 0) {
3293  av_log(NULL, AV_LOG_FATAL,
3294  "Error setting up codec context options.\n");
3295  return ret;
3296  }
3297 
3298  ret = avcodec_parameters_from_context(par_src, ost->enc_ctx);
3299  if (ret < 0) {
3300  av_log(NULL, AV_LOG_FATAL,
3301  "Error getting reference codec parameters.\n");
3302  return ret;
3303  }
3304 
3305  if (!codec_tag) {
3306  unsigned int codec_tag_tmp;
3307  if (!of->ctx->oformat->codec_tag ||
3308  av_codec_get_id (of->ctx->oformat->codec_tag, par_src->codec_tag) == par_src->codec_id ||
3309  !av_codec_get_tag2(of->ctx->oformat->codec_tag, par_src->codec_id, &codec_tag_tmp))
3310  codec_tag = par_src->codec_tag;
3311  }
3312 
3313  ret = avcodec_parameters_copy(par_dst, par_src);
3314  if (ret < 0)
3315  return ret;
3316 
3317  par_dst->codec_tag = codec_tag;
3318 
3319  if (!ost->frame_rate.num)
3320  ost->frame_rate = ist->framerate;
3321  ost->st->avg_frame_rate = ost->frame_rate;
3322 
3323  ret = avformat_transfer_internal_stream_timing_info(of->ctx->oformat, ost->st, ist->st, copy_tb);
3324  if (ret < 0)
3325  return ret;
3326 
3327  // copy timebase while removing common factors
3328  if (ost->st->time_base.num <= 0 || ost->st->time_base.den <= 0)
3329  ost->st->time_base = av_add_q(av_stream_get_codec_timebase(ost->st), (AVRational){0, 1});
3330 
3331  // copy estimated duration as a hint to the muxer
3332  if (ost->st->duration <= 0 && ist->st->duration > 0)
3333  ost->st->duration = av_rescale_q(ist->st->duration, ist->st->time_base, ost->st->time_base);
3334 
3335  // copy disposition
3336  ost->st->disposition = ist->st->disposition;
3337 
3338  if (ist->st->nb_side_data) {
3339  for (i = 0; i < ist->st->nb_side_data; i++) {
3340  const AVPacketSideData *sd_src = &ist->st->side_data[i];
3341  uint8_t *dst_data;
3342 
3343  dst_data = av_stream_new_side_data(ost->st, sd_src->type, sd_src->size);
3344  if (!dst_data)
3345  return AVERROR(ENOMEM);
3346  memcpy(dst_data, sd_src->data, sd_src->size);
3347  }
3348  }
3349 
3350  if (ost->rotate_overridden) {
3351  uint8_t *sd = av_stream_new_side_data(ost->st, AV_PKT_DATA_DISPLAYMATRIX,
3352  sizeof(int32_t) * 9);
3353  if (sd)
3354  av_display_rotation_set((int32_t *)sd, -ost->rotate_override_value);
3355  }
3356 
3357  switch (par_dst->codec_type) {
3358  case AVMEDIA_TYPE_AUDIO:
3359  if (audio_volume != 256) {
3360  av_log(NULL, AV_LOG_FATAL, "-acodec copy and -vol are incompatible (frames are not decoded)\n");
3361  exit_program(1);
3362  }
3363  if((par_dst->block_align == 1 || par_dst->block_align == 1152 || par_dst->block_align == 576) && par_dst->codec_id == AV_CODEC_ID_MP3)
3364  par_dst->block_align= 0;
3365  if(par_dst->codec_id == AV_CODEC_ID_AC3)
3366  par_dst->block_align= 0;
3367  break;
3368  case AVMEDIA_TYPE_VIDEO:
3369  if (ost->frame_aspect_ratio.num) { // overridden by the -aspect cli option
3370  sar =
3371  av_mul_q(ost->frame_aspect_ratio,
3372  (AVRational){ par_dst->height, par_dst->width });
3373  av_log(NULL, AV_LOG_WARNING, "Overriding aspect ratio "
3374  "with stream copy may produce invalid files\n");
3375  }
3376  else if (ist->st->sample_aspect_ratio.num)
3377  sar = ist->st->sample_aspect_ratio;
3378  else
3379  sar = par_src->sample_aspect_ratio;
3380  ost->st->sample_aspect_ratio = par_dst->sample_aspect_ratio = sar;
3381  ost->st->avg_frame_rate = ist->st->avg_frame_rate;
3382  ost->st->r_frame_rate = ist->st->r_frame_rate;
3383  break;
3384  }
3385 
3386  ost->mux_timebase = ist->st->time_base;
3387 
3388  return 0;
3389 }
3390 
3392 {
3393  AVDictionaryEntry *e;
3394 
3395  uint8_t *encoder_string;
3396  int encoder_string_len;
3397  int format_flags = 0;
3398  int codec_flags = ost->enc_ctx->flags;
3399 
3400  if (av_dict_get(ost->st->metadata, "encoder", NULL, 0))
3401  return;
3402 
3403  e = av_dict_get(of->opts, "fflags", NULL, 0);
3404  if (e) {
3405  const AVOption *o = av_opt_find(of->ctx, "fflags", NULL, 0, 0);
3406  if (!o)
3407  return;
3408  av_opt_eval_flags(of->ctx, o, e->value, &format_flags);
3409  }
3410  e = av_dict_get(ost->encoder_opts, "flags", NULL, 0);
3411  if (e) {
3412  const AVOption *o = av_opt_find(ost->enc_ctx, "flags", NULL, 0, 0);
3413  if (!o)
3414  return;
3415  av_opt_eval_flags(ost->enc_ctx, o, e->value, &codec_flags);
3416  }
3417 
3418  encoder_string_len = sizeof(LIBAVCODEC_IDENT) + strlen(ost->enc->name) + 2;
3419  encoder_string = av_mallocz(encoder_string_len);
3420  if (!encoder_string)
3421  exit_program(1);
3422 
3423  if (!(format_flags & AVFMT_FLAG_BITEXACT) && !(codec_flags & AV_CODEC_FLAG_BITEXACT))
3424  av_strlcpy(encoder_string, LIBAVCODEC_IDENT " ", encoder_string_len);
3425  else
3426  av_strlcpy(encoder_string, "Lavc ", encoder_string_len);
3427  av_strlcat(encoder_string, ost->enc->name, encoder_string_len);
3428  av_dict_set(&ost->st->metadata, "encoder", encoder_string,
3429  AV_DICT_DONT_STRDUP_VAL | AV_DICT_DONT_OVERWRITE);
3430 }
3431 
3432 static void parse_forced_key_frames(char *kf, OutputStream *ost,
3433  AVCodecContext *avctx)
3434 {
3435  char *p;
3436  int n = 1, i, size, index = 0;
3437  int64_t t, *pts;
3438 
3439  for (p = kf; *p; p++)
3440  if (*p == ',')
3441  n++;
3442  size = n;
3443  pts = av_malloc_array(size, sizeof(*pts));
3444  if (!pts) {
3445  av_log(NULL, AV_LOG_FATAL, "Could not allocate forced key frames array.\n");
3446  exit_program(1);
3447  }
3448 
3449  p = kf;
3450  for (i = 0; i < n; i++) {
3451  char *next = strchr(p, ',');
3452 
3453  if (next)
3454  *next++ = 0;
3455 
3456  if (!memcmp(p, "chapters", 8)) {
3457 
3458  AVFormatContext *avf = output_files[ost->file_index]->ctx;
3459  int j;
3460 
3461  if (avf->nb_chapters > INT_MAX - size ||
3462  !(pts = av_realloc_f(pts, size += avf->nb_chapters - 1,
3463  sizeof(*pts)))) {
3464  av_log(NULL, AV_LOG_FATAL,
3465  "Could not allocate forced key frames array.\n");
3466  exit_program(1);
3467  }
3468  t = p[8] ? parse_time_or_die("force_key_frames", p + 8, 1) : 0;
3469  t = av_rescale_q(t, AV_TIME_BASE_Q, avctx->time_base);
3470 
3471  for (j = 0; j < avf->nb_chapters; j++) {
3472  AVChapter *c = avf->chapters[j];
3473  av_assert1(index < size);
3474  pts[index++] = av_rescale_q(c->start, c->time_base,
3475  avctx->time_base) + t;
3476  }
3477 
3478  } else {
3479 
3480  t = parse_time_or_die("force_key_frames", p, 1);
3481  av_assert1(index < size);
3482  pts[index++] = av_rescale_q(t, AV_TIME_BASE_Q, avctx->time_base);
3483 
3484  }
3485 
3486  p = next;
3487  }
3488 
3489  av_assert0(index == size);
3490  qsort(pts, size, sizeof(*pts), compare_int64);
3491  ost->forced_kf_count = size;
3492  ost->forced_kf_pts = pts;
3493 }
3494 
3495 static void init_encoder_time_base(OutputStream *ost, AVRational default_time_base)
3496 {
3497  InputStream *ist = get_input_stream(ost);
3498  AVCodecContext *enc_ctx = ost->enc_ctx;
3499  AVFormatContext *oc;
3500 
3501  if (ost->enc_timebase.num > 0) {
3502  enc_ctx->time_base = ost->enc_timebase;
3503  return;
3504  }
3505 
3506  if (ost->enc_timebase.num < 0) {
3507  if (ist) {
3508  enc_ctx->time_base = ist->st->time_base;
3509  return;
3510  }
3511 
3512  oc = output_files[ost->file_index]->ctx;
3513  av_log(oc, AV_LOG_WARNING, "Input stream data not available, using default time base\n");
3514  }
3515 
3516  enc_ctx->time_base = default_time_base;
3517 }
3518 
3519 static int init_output_stream_encode(OutputStream *ost, AVFrame *frame)
3520 {
3521  InputStream *ist = get_input_stream(ost);
3522  AVCodecContext *enc_ctx = ost->enc_ctx;
3523  AVCodecContext *dec_ctx = NULL;
3524  AVFormatContext *oc = output_files[ost->file_index]->ctx;
3525  int j, ret;
3526 
3528 
3529  // Muxers use AV_PKT_DATA_DISPLAYMATRIX to signal rotation. On the other
3530  // hand, the legacy API makes demuxers set "rotate" metadata entries,
3531  // which have to be filtered out to prevent leaking them to output files.
3532  av_dict_set(&ost->st->metadata, "rotate", NULL, 0);
3533 
3534  if (ist) {
3535  ost->st->disposition = ist->st->disposition;
3536 
3537  dec_ctx = ist->dec_ctx;
3538 
3539  enc_ctx->chroma_sample_location = dec_ctx->chroma_sample_location;
3540  } else {
3541  for (j = 0; j < oc->nb_streams; j++) {
3542  AVStream *st = oc->streams[j];
3543  if (st != ost->st && st->codecpar->codec_type == ost->st->codecpar->codec_type)
3544  break;
3545  }
3546  if (j == oc->nb_streams)
3547  if (ost->st->codecpar->codec_type == AVMEDIA_TYPE_AUDIO ||
3548  ost->st->codecpar->codec_type == AVMEDIA_TYPE_VIDEO)
3549  ost->st->disposition = AV_DISPOSITION_DEFAULT;
3550  }
3551 
3552  if (enc_ctx->codec_type == AVMEDIA_TYPE_VIDEO) {
3553  if (!ost->frame_rate.num)
3554  ost->frame_rate = av_buffersink_get_frame_rate(ost->filter->filter);
3555  if (ist && !ost->frame_rate.num)
3556  ost->frame_rate = ist->framerate;
3557  if (ist && !ost->frame_rate.num)
3558  ost->frame_rate = ist->st->r_frame_rate;
3559  if (ist && !ost->frame_rate.num && !ost->max_frame_rate.num) {
3560  ost->frame_rate = (AVRational){25, 1};
3561  av_log(NULL, AV_LOG_WARNING,
3562  "No information "
3563  "about the input framerate is available. Falling "
3564  "back to a default value of 25fps for output stream #%d:%d. Use the -r option "
3565  "if you want a different framerate.\n",
3566  ost->file_index, ost->index);
3567  }
3568 
3569  if (ost->max_frame_rate.num &&
3570  (av_q2d(ost->frame_rate) > av_q2d(ost->max_frame_rate) ||
3571  !ost->frame_rate.den))
3572  ost->frame_rate = ost->max_frame_rate;
3573 
3574  if (ost->enc->supported_framerates && !ost->force_fps) {
3575  int idx = av_find_nearest_q_idx(ost->frame_rate, ost->enc->supported_framerates);
3576  ost->frame_rate = ost->enc->supported_framerates[idx];
3577  }
3578  // reduce frame rate for mpeg4 to be within the spec limits
3579  if (enc_ctx->codec_id == AV_CODEC_ID_MPEG4) {
3580  av_reduce(&ost->frame_rate.num, &ost->frame_rate.den,
3581  ost->frame_rate.num, ost->frame_rate.den, 65535);
3582  }
3583  }
3584 
3585  switch (enc_ctx->codec_type) {
3586  case AVMEDIA_TYPE_AUDIO:
3587  enc_ctx->sample_fmt = av_buffersink_get_format(ost->filter->filter);
3588  if (dec_ctx)
3589  enc_ctx->bits_per_raw_sample = FFMIN(dec_ctx->bits_per_raw_sample,
3590  av_get_bytes_per_sample(enc_ctx->sample_fmt) << 3);
3591  enc_ctx->sample_rate = av_buffersink_get_sample_rate(ost->filter->filter);
3592  enc_ctx->channel_layout = av_buffersink_get_channel_layout(ost->filter->filter);
3593  enc_ctx->channels = av_buffersink_get_channels(ost->filter->filter);
3594 
3595  init_encoder_time_base(ost, av_make_q(1, enc_ctx->sample_rate));
3596  break;
3597 
3598  case AVMEDIA_TYPE_VIDEO:
3599  init_encoder_time_base(ost, av_inv_q(ost->frame_rate));
3600 
3601  if (!(enc_ctx->time_base.num && enc_ctx->time_base.den))
3602  enc_ctx->time_base = av_buffersink_get_time_base(ost->filter->filter);
3603  if ( av_q2d(enc_ctx->time_base) < 0.001 && video_sync_method != VSYNC_PASSTHROUGH
3604  && (video_sync_method == VSYNC_CFR || video_sync_method == VSYNC_VSCFR || (video_sync_method == VSYNC_AUTO && !(oc->oformat->flags & AVFMT_VARIABLE_FPS)))){
3605  av_log(oc, AV_LOG_WARNING, "Frame rate very high for a muxer not efficiently supporting it.\n"
3606  "Please consider specifying a lower framerate, a different muxer or -vsync 2\n");
3607  }
3608 
3609  enc_ctx->width = av_buffersink_get_w(ost->filter->filter);
3610  enc_ctx->height = av_buffersink_get_h(ost->filter->filter);
3611  enc_ctx->sample_aspect_ratio = ost->st->sample_aspect_ratio =
3612  ost->frame_aspect_ratio.num ? // overridden by the -aspect cli option
3613  av_mul_q(ost->frame_aspect_ratio, (AVRational){ enc_ctx->height, enc_ctx->width }) :
3614  av_buffersink_get_sample_aspect_ratio(ost->filter->filter);
3615 
3616  enc_ctx->pix_fmt = av_buffersink_get_format(ost->filter->filter);
3617  if (dec_ctx)
3618  enc_ctx->bits_per_raw_sample = FFMIN(dec_ctx->bits_per_raw_sample,
3619  av_pix_fmt_desc_get(enc_ctx->pix_fmt)->comp[0].depth);
3620 
3621  if (frame) {
3622  enc_ctx->color_range = frame->color_range;
3623  enc_ctx->color_primaries = frame->color_primaries;
3624  enc_ctx->color_trc = frame->color_trc;
3625  enc_ctx->colorspace = frame->colorspace;
3626  enc_ctx->chroma_sample_location = frame->chroma_location;
3627  }
3628 
3629  enc_ctx->framerate = ost->frame_rate;
3630 
3631  ost->st->avg_frame_rate = ost->frame_rate;
3632 
3633  if (!dec_ctx ||
3634  enc_ctx->width != dec_ctx->width ||
3635  enc_ctx->height != dec_ctx->height ||
3636  enc_ctx->pix_fmt != dec_ctx->pix_fmt) {
3637  enc_ctx->bits_per_raw_sample = frame_bits_per_raw_sample;
3638  }
3639 
3640  if (ost->top_field_first == 0) {
3641  enc_ctx->field_order = AV_FIELD_BB;
3642  } else if (ost->top_field_first == 1) {
3643  enc_ctx->field_order = AV_FIELD_TT;
3644  }
3645 
3646  if (frame) {
3647  if (enc_ctx->flags & (AV_CODEC_FLAG_INTERLACED_DCT | AV_CODEC_FLAG_INTERLACED_ME) &&
3648  ost->top_field_first >= 0)
3649  frame->top_field_first = !!ost->top_field_first;
3650 
3651  if (frame->interlaced_frame) {
3652  if (enc_ctx->codec->id == AV_CODEC_ID_MJPEG)
3653  enc_ctx->field_order = frame->top_field_first ? AV_FIELD_TT:AV_FIELD_BB;
3654  else
3655  enc_ctx->field_order = frame->top_field_first ? AV_FIELD_TB:AV_FIELD_BT;
3656  } else
3657  enc_ctx->field_order = AV_FIELD_PROGRESSIVE;
3658  }
3659 
3660  if (ost->forced_keyframes) {
3661  if (!strncmp(ost->forced_keyframes, "expr:", 5)) {
3662  ret = av_expr_parse(&ost->forced_keyframes_pexpr, ost->forced_keyframes+5,
3663  forced_keyframes_const_names, NULL, NULL, NULL, NULL, 0, NULL);
3664  if (ret < 0) {
3665  av_log(NULL, AV_LOG_ERROR,
3666  "Invalid force_key_frames expression '%s'\n", ost->forced_keyframes+5);
3667  return ret;
3668  }
3673 
3674  // Don't parse the 'forced_keyframes' in case of 'keep-source-keyframes',
3675  // parse it only for static kf timings
3676  } else if(strncmp(ost->forced_keyframes, "source", 6)) {
3678  }
3679  }
3680  break;
3681  case AVMEDIA_TYPE_SUBTITLE:
3682  enc_ctx->time_base = AV_TIME_BASE_Q;
3683  if (!enc_ctx->width) {
3684  enc_ctx->width = input_streams[ost->source_index]->st->codecpar->width;
3685  enc_ctx->height = input_streams[ost->source_index]->st->codecpar->height;
3686  }
3687  break;
3688  case AVMEDIA_TYPE_DATA:
3689  break;
3690  default:
3691  abort();
3692  break;
3693  }
3694 
3695  ost->mux_timebase = enc_ctx->time_base;
3696 
3697  return 0;
3698 }
3699 
3700 static int init_output_stream(OutputStream *ost, AVFrame *frame, char *error, int error_len)
3701 {
3702  int ret = 0;
3703 
3704  if (ost->encoding_needed) {
3705  AVCodec *codec = ost->enc;
3706  AVCodecContext *dec = NULL;
3707  InputStream *ist;
3708 
3709  ret = init_output_stream_encode(ost, frame);
3710  if (ret < 0)
3711  return ret;
3712 
3713  if ((ist = get_input_stream(ost)))
3714  dec = ist->dec_ctx;
3715  if (dec && dec->subtitle_header) {
3716  /* ASS code assumes this buffer is null terminated so add extra byte. */
3717  ost->enc_ctx->subtitle_header = av_mallocz(dec->subtitle_header_size + 1);
3718  if (!ost->enc_ctx->subtitle_header)
3719  return AVERROR(ENOMEM);
3720  memcpy(ost->enc_ctx->subtitle_header, dec->subtitle_header, dec->subtitle_header_size);
3721  ost->enc_ctx->subtitle_header_size = dec->subtitle_header_size;
3722  }
3723  if (!av_dict_get(ost->encoder_opts, "threads", NULL, 0))
3724  av_dict_set(&ost->encoder_opts, "threads", "auto", 0);
3725  if (ost->enc->type == AVMEDIA_TYPE_AUDIO &&
3726  !codec->defaults &&
3727  !av_dict_get(ost->encoder_opts, "b", NULL, 0) &&
3728  !av_dict_get(ost->encoder_opts, "ab", NULL, 0))
3729  av_dict_set(&ost->encoder_opts, "b", "128000", 0);
3730 
3732  if (ret < 0) {
3733  snprintf(error, error_len, "Device setup failed for "
3734  "encoder on output stream #%d:%d : %s",
3735  ost->file_index, ost->index, av_err2str(ret));
3736  return ret;
3737  }
3738 
3739  if (ist && ist->dec->type == AVMEDIA_TYPE_SUBTITLE && ost->enc->type == AVMEDIA_TYPE_SUBTITLE) {
3740  int input_props = 0, output_props = 0;
3741  AVCodecDescriptor const *input_descriptor =
3742  avcodec_descriptor_get(dec->codec_id);
3743  AVCodecDescriptor const *output_descriptor =
3744  avcodec_descriptor_get(ost->enc_ctx->codec_id);
3745  if (input_descriptor)
3746  input_props = input_descriptor->props & (AV_CODEC_PROP_TEXT_SUB | AV_CODEC_PROP_BITMAP_SUB);
3747  if (output_descriptor)
3748  output_props = output_descriptor->props & (AV_CODEC_PROP_TEXT_SUB | AV_CODEC_PROP_BITMAP_SUB);
3749  if (input_props && output_props && input_props != output_props) {
3750  snprintf(error, error_len,
3751  "Subtitle encoding currently only possible from text to text "
3752  "or bitmap to bitmap");
3753  return AVERROR_INVALIDDATA;
3754  }
3755  }
3756 
3757  if ((ret = avcodec_open2(ost->enc_ctx, codec, &ost->encoder_opts)) < 0) {
3758  if (ret == AVERROR_EXPERIMENTAL)
3759  abort_codec_experimental(codec, 1);
3760  snprintf(error, error_len,
3761  "Error while opening encoder for output stream #%d:%d - "
3762  "maybe incorrect parameters such as bit_rate, rate, width or height",
3763  ost->file_index, ost->index);
3764  return ret;
3765  }
3766  if (ost->enc->type == AVMEDIA_TYPE_AUDIO &&
3767  !(ost->enc->capabilities & AV_CODEC_CAP_VARIABLE_FRAME_SIZE))
3768  av_buffersink_set_frame_size(ost->filter->filter,
3769  ost->enc_ctx->frame_size);
3771  if (ost->enc_ctx->bit_rate && ost->enc_ctx->bit_rate < 1000 &&
3772  ost->enc_ctx->codec_id != AV_CODEC_ID_CODEC2 /* don't complain about 700 bit/s modes */)
3773  av_log(NULL, AV_LOG_WARNING, "The bitrate parameter is set too low."
3774  " It takes bits/s as argument, not kbits/s\n");
3775 
3776  ret = avcodec_parameters_from_context(ost->st->codecpar, ost->enc_ctx);
3777  if (ret < 0) {
3778  av_log(NULL, AV_LOG_FATAL,
3779  "Error initializing the output stream codec context.\n");
3780  exit_program(1);
3781  }
3782 
3783  if (ost->enc_ctx->nb_coded_side_data) {
3784  int i;
3785 
3786  for (i = 0; i < ost->enc_ctx->nb_coded_side_data; i++) {
3787  const AVPacketSideData *sd_src = &ost->enc_ctx->coded_side_data[i];
3788  uint8_t *dst_data;
3789 
3790  dst_data = av_stream_new_side_data(ost->st, sd_src->type, sd_src->size);
3791  if (!dst_data)
3792  return AVERROR(ENOMEM);
3793  memcpy(dst_data, sd_src->data, sd_src->size);
3794  }
3795  }
3796 
3797  /*
3798  * Add global input side data. For now this is naive, and copies it
3799  * from the input stream's global side data. All side data should
3800  * really be funneled over AVFrame and libavfilter, then added back to
3801  * packet side data, and then potentially using the first packet for
3802  * global side data.
3803  */
3804  if (ist) {
3805  int i;
3806  for (i = 0; i < ist->st->nb_side_data; i++) {
3807  AVPacketSideData *sd = &ist->st->side_data[i];
3808  if (sd->type != AV_PKT_DATA_CPB_PROPERTIES) {
3809  uint8_t *dst = av_stream_new_side_data(ost->st, sd->type, sd->size);
3810  if (!dst)
3811  return AVERROR(ENOMEM);
3812  memcpy(dst, sd->data, sd->size);
3813  if (ist->autorotate && sd->type == AV_PKT_DATA_DISPLAYMATRIX)
3814  av_display_rotation_set((uint32_t *)dst, 0);
3815  }
3816  }
3817  }
3818 
3819  // copy timebase while removing common factors
3820  if (ost->st->time_base.num <= 0 || ost->st->time_base.den <= 0)
3821  ost->st->time_base = av_add_q(ost->enc_ctx->time_base, (AVRational){0, 1});
3822 
3823  // copy estimated duration as a hint to the muxer
3824  if (ost->st->duration <= 0 && ist && ist->st->duration > 0)
3825  ost->st->duration = av_rescale_q(ist->st->duration, ist->st->time_base, ost->st->time_base);
3826  } else if (ost->stream_copy) {
3828  if (ret < 0)
3829  return ret;
3830  }
3831 
3832  // parse user provided disposition, and update stream values
3833  if (ost->disposition) {
3834  static const AVOption opts[] = {
3835  { "disposition" , NULL, 0, AV_OPT_TYPE_FLAGS, { .i64 = 0 }, INT64_MIN, (double)INT64_MAX, .unit = "flags" },
3836  { "default" , NULL, 0, AV_OPT_TYPE_CONST, { .i64 = AV_DISPOSITION_DEFAULT }, .unit = "flags" },
3837  { "dub" , NULL, 0, AV_OPT_TYPE_CONST, { .i64 = AV_DISPOSITION_DUB }, .unit = "flags" },
3838  { "original" , NULL, 0, AV_OPT_TYPE_CONST, { .i64 = AV_DISPOSITION_ORIGINAL }, .unit = "flags" },
3839  { "comment" , NULL, 0, AV_OPT_TYPE_CONST, { .i64 = AV_DISPOSITION_COMMENT }, .unit = "flags" },
3840  { "lyrics" , NULL, 0, AV_OPT_TYPE_CONST, { .i64 = AV_DISPOSITION_LYRICS }, .unit = "flags" },
3841  { "karaoke" , NULL, 0, AV_OPT_TYPE_CONST, { .i64 = AV_DISPOSITION_KARAOKE }, .unit = "flags" },
3842  { "forced" , NULL, 0, AV_OPT_TYPE_CONST, { .i64 = AV_DISPOSITION_FORCED }, .unit = "flags" },
3843  { "hearing_impaired" , NULL, 0, AV_OPT_TYPE_CONST, { .i64 = AV_DISPOSITION_HEARING_IMPAIRED }, .unit = "flags" },
3844  { "visual_impaired" , NULL, 0, AV_OPT_TYPE_CONST, { .i64 = AV_DISPOSITION_VISUAL_IMPAIRED }, .unit = "flags" },
3845  { "clean_effects" , NULL, 0, AV_OPT_TYPE_CONST, { .i64 = AV_DISPOSITION_CLEAN_EFFECTS }, .unit = "flags" },
3846  { "attached_pic" , NULL, 0, AV_OPT_TYPE_CONST, { .i64 = AV_DISPOSITION_ATTACHED_PIC }, .unit = "flags" },
3847  { "captions" , NULL, 0, AV_OPT_TYPE_CONST, { .i64 = AV_DISPOSITION_CAPTIONS }, .unit = "flags" },
3848  { "descriptions" , NULL, 0, AV_OPT_TYPE_CONST, { .i64 = AV_DISPOSITION_DESCRIPTIONS }, .unit = "flags" },
3849  { "dependent" , NULL, 0, AV_OPT_TYPE_CONST, { .i64 = AV_DISPOSITION_DEPENDENT }, .unit = "flags" },
3850  { "metadata" , NULL, 0, AV_OPT_TYPE_CONST, { .i64 = AV_DISPOSITION_METADATA }, .unit = "flags" },
3851  { NULL },
3852  };
3853  static const AVClass class = {
3854  .class_name = "",
3855  .item_name = av_default_item_name,
3856  .option = opts,
3857  .version = LIBAVUTIL_VERSION_INT,
3858  };
3859  const AVClass *pclass = &class;
3860 
3861  ret = av_opt_eval_flags(&pclass, &opts[0], ost->disposition, &ost->st->disposition);
3862  if (ret < 0)
3863  return ret;
3864  }
3865 
3866  /* initialize bitstream filters for the output stream
3867  * needs to be done here, because the codec id for streamcopy is not
3868  * known until now */
3869  ret = init_output_bsfs(ost);
3870  if (ret < 0)
3871  return ret;
3872 
3873  ost->initialized = 1;
3874 
3876  if (ret < 0)
3877  return ret;
3878 
3879  return ret;
3880 }
3881 
3882 static void report_new_stream(int input_index, AVPacket *pkt)
3883 {
3884  InputFile *file = input_files[input_index];
3885  AVStream *st = file->ctx->streams[pkt->stream_index];
3886 
3887  if (pkt->stream_index < file->nb_streams_warn)
3888  return;
3889  av_log(file->ctx, AV_LOG_WARNING,
3890  "New %s stream %d:%d at pos:%"PRId64" and DTS:%ss\n",
3891  av_get_media_type_string(st->codecpar->codec_type),
3892  input_index, pkt->stream_index,
3893  pkt->pos, av_ts2timestr(pkt->dts, &st->time_base));
3894  file->nb_streams_warn = pkt->stream_index + 1;
3895 }
3896 
3897 static int transcode_init(void)
3898 {
3899  int ret = 0, i, j, k;
3900  AVFormatContext *oc;
3901  OutputStream *ost;
3902  InputStream *ist;
3903  char error[1024] = {0};
3904 
3905  for (i = 0; i < nb_filtergraphs; i++) {
3906  FilterGraph *fg = filtergraphs[i];
3907  for (j = 0; j < fg->nb_outputs; j++) {
3908  OutputFilter *ofilter = fg->outputs[j];
3909  if (!ofilter->ost || ofilter->ost->source_index >= 0)
3910  continue;
3911  if (fg->nb_inputs != 1)
3912  continue;
3913  for (k = nb_input_streams-1; k >= 0 ; k--)
3914  if (fg->inputs[0]->ist == input_streams[k])
3915  break;
3916  ofilter->ost->source_index = k;
3917  }
3918  }
3919 
3920  /* init framerate emulation */
3921  for (i = 0; i < nb_input_files; i++) {
3922  InputFile *ifile = input_files[i];
3923  if (ifile->rate_emu)
3924  for (j = 0; j < ifile->nb_streams; j++)
3925  input_streams[j + ifile->ist_index]->start = av_gettime_relative();
3926  }
3927 
3928  /* init input streams */
3929  for (i = 0; i < nb_input_streams; i++)
3930  if ((ret = init_input_stream(i, error, sizeof(error))) < 0) {
3931  for (i = 0; i < nb_output_streams; i++) {
3932  ost = output_streams[i];
3933  avcodec_close(ost->enc_ctx);
3934  }
3935  goto dump_format;
3936  }
3937 
3938  /*
3939  * initialize stream copy and subtitle/data streams.
3940  * Encoded AVFrame based streams will get initialized as follows:
3941  * - when the first AVFrame is received in do_video_out
3942  * - just before the first AVFrame is received in either transcode_step
3943  * or reap_filters due to us requiring the filter chain buffer sink
3944  * to be configured with the correct audio frame size, which is only
3945  * known after the encoder is initialized.
3946  */
3947  for (i = 0; i < nb_output_streams; i++) {
3948  if (!output_streams[i]->stream_copy &&
3949  (output_streams[i]->enc_ctx->codec_type == AVMEDIA_TYPE_VIDEO ||
3950  output_streams[i]->enc_ctx->codec_type == AVMEDIA_TYPE_AUDIO))
3951  continue;
3952 
3953  ret = init_output_stream_wrapper(output_streams[i], NULL, 0);
3954  if (ret < 0)
3955  goto dump_format;
3956  }
3957 
3958  /* discard unused programs */
3959  for (i = 0; i < nb_input_files; i++) {
3960  InputFile *ifile = input_files[i];
3961  for (j = 0; j < ifile->ctx->nb_programs; j++) {
3962  AVProgram *p = ifile->ctx->programs[j];
3963  int discard = AVDISCARD_ALL;
3964 
3965  for (k = 0; k < p->nb_stream_indexes; k++)
3966  if (!input_streams[ifile->ist_index + p->stream_index[k]]->discard) {
3967  discard = AVDISCARD_DEFAULT;
3968  break;
3969  }
3970  p->discard = discard;
3971  }
3972  }
3973 
3974  /* write headers for files with no streams */
3975  for (i = 0; i < nb_output_files; i++) {
3976  oc = output_files[i]->ctx;
3977  if (oc->oformat->flags & AVFMT_NOSTREAMS && oc->nb_streams == 0) {
3978  ret = check_init_output_file(output_files[i], i);
3979  if (ret < 0)
3980  goto dump_format;
3981  }
3982  }
3983 
3984  dump_format:
3985  /* dump the stream mapping */
3986  av_log(NULL, AV_LOG_INFO, "Stream mapping:\n");
3987  for (i = 0; i < nb_input_streams; i++) {
3988  ist = input_streams[i];
3989 
3990  for (j = 0; j < ist->nb_filters; j++) {
3991  if (!filtergraph_is_simple(ist->filters[j]->graph)) {
3992  av_log(NULL, AV_LOG_INFO, " Stream #%d:%d (%s) -> %s",
3993  ist->file_index, ist->st->index, ist->dec ? ist->dec->name : "?",
3994  ist->filters[j]->name);
3995  if (nb_filtergraphs > 1)
3996  av_log(NULL, AV_LOG_INFO, " (graph %d)", ist->filters[j]->graph->index);
3997  av_log(NULL, AV_LOG_INFO, "\n");
3998  }
3999  }
4000  }
4001 
4002  for (i = 0; i < nb_output_streams; i++) {
4003  ost = output_streams[i];
4004 
4005  if (ost->attachment_filename) {
4006  /* an attached file */
4007  av_log(NULL, AV_LOG_INFO, " File %s -> Stream #%d:%d\n",
4008  ost->attachment_filename, ost->file_index, ost->index);
4009  continue;
4010  }
4011 
4012  if (ost->filter && !filtergraph_is_simple(ost->filter->graph)) {
4013  /* output from a complex graph */
4014  av_log(NULL, AV_LOG_INFO, " %s", ost->filter->name);
4015  if (nb_filtergraphs > 1)
4016  av_log(NULL, AV_LOG_INFO, " (graph %d)", ost->filter->graph->index);
4017 
4018  av_log(NULL, AV_LOG_INFO, " -> Stream #%d:%d (%s)\n", ost->file_index,
4019  ost->index, ost->enc ? ost->enc->name : "?");
4020  continue;
4021  }
4022 
4023  av_log(NULL, AV_LOG_INFO, " Stream #%d:%d -> #%d:%d",
4025  input_streams[ost->source_index]->st->index,
4026  ost->file_index,
4027  ost->index);
4028  if (ost->sync_ist != input_streams[ost->source_index])
4029  av_log(NULL, AV_LOG_INFO, " [sync #%d:%d]",
4030  ost->sync_ist->file_index,
4031  ost->sync_ist->st->index);
4032  if (ost->stream_copy)
4033  av_log(NULL, AV_LOG_INFO, " (copy)");
4034  else {
4035  const AVCodec *in_codec = input_streams[ost->source_index]->dec;
4036  const AVCodec *out_codec = ost->enc;
4037  const char *decoder_name = "?";
4038  const char *in_codec_name = "?";
4039  const char *encoder_name = "?";
4040  const char *out_codec_name = "?";
4041  const AVCodecDescriptor *desc;
4042 
4043  if (in_codec) {
4044  decoder_name = in_codec->name;
4045  desc = avcodec_descriptor_get(in_codec->id);
4046  if (desc)
4047  in_codec_name = desc->name;
4048  if (!strcmp(decoder_name, in_codec_name))
4049  decoder_name = "native";
4050  }
4051 
4052  if (out_codec) {
4053  encoder_name = out_codec->name;
4054  desc = avcodec_descriptor_get(out_codec->id);
4055  if (desc)
4056  out_codec_name = desc->name;
4057  if (!strcmp(encoder_name, out_codec_name))
4058  encoder_name = "native";
4059  }
4060 
4061  av_log(NULL, AV_LOG_INFO, " (%s (%s) -> %s (%s))",
4062  in_codec_name, decoder_name,
4063  out_codec_name, encoder_name);
4064  }
4065  av_log(NULL, AV_LOG_INFO, "\n");
4066  }
4067 
4068  if (ret) {
4069  av_log(NULL, AV_LOG_ERROR, "%s\n", error);
4070  return ret;
4071  }
4072 
4073  atomic_store(&transcode_init_done, 1);
4074 
4075  return 0;
4076 }
4077 
4078 /* Return 1 if there remain streams where more output is wanted, 0 otherwise. */
4079 static int need_output(void)
4080 {
4081  int i;
4082 
4083  for (i = 0; i < nb_output_streams; i++) {
4084  OutputStream *ost = output_streams[i];
4085  OutputFile *of = output_files[ost->file_index];
4086  AVFormatContext *os = output_files[ost->file_index]->ctx;
4087 
4088  if (ost->finished ||
4089  (os->pb && avio_tell(os->pb) >= of->limit_filesize))
4090  continue;
4091  if (ost->frame_number >= ost->max_frames) {
4092  int j;
4093  for (j = 0; j < of->ctx->nb_streams; j++)
4095  continue;
4096  }
4097 
4098  return 1;
4099  }
4100 
4101  return 0;
4102 }
4103 
4110 {
4111  int i;
4112  int64_t opts_min = INT64_MAX;
4113  OutputStream *ost_min = NULL;
4114 
4115  for (i = 0; i < nb_output_streams; i++) {
4116  OutputStream *ost = output_streams[i];
4117  int64_t opts = ost->st->cur_dts == AV_NOPTS_VALUE ? INT64_MIN :
4118  av_rescale_q(ost->st->cur_dts, ost->st->time_base,
4119  AV_TIME_BASE_Q);
4120  if (ost->st->cur_dts == AV_NOPTS_VALUE)
4121  av_log(NULL, AV_LOG_DEBUG,
4122  "cur_dts is invalid st:%d (%d) [init:%d i_done:%d finish:%d] (this is harmless if it occurs once at the start per stream)\n",
4123  ost->st->index, ost->st->id, ost->initialized, ost->inputs_done, ost->finished);
4124 
4125  if (!ost->initialized && !ost->inputs_done)
4126  return ost;
4127 
4128  if (!ost->finished && opts < opts_min) {
4129  opts_min = opts;
4130  ost_min = ost->unavailable ? NULL : ost;
4131  }
4132  }
4133  return ost_min;
4134 }
4135 
4136 static void set_tty_echo(int on)
4137 {
4138 #if HAVE_TERMIOS_H
4139  struct termios tty;
4140  if (tcgetattr(0, &tty) == 0) {
4141  if (on) tty.c_lflag |= ECHO;
4142  else tty.c_lflag &= ~ECHO;
4143  tcsetattr(0, TCSANOW, &tty);
4144  }
4145 #endif
4146 }
4147 
4148 static int check_keyboard_interaction(int64_t cur_time)
4149 {
4150  int i, ret, key;
4151  if (received_nb_signals)
4152  return AVERROR_EXIT;
4153  /* read_key() returns 0 on EOF */
4154  if(cur_time - keyboard_last_time >= 100000 && !run_as_daemon){
4155  key = read_key();
4156  keyboard_last_time = cur_time;
4157  }else
4158  key = -1;
4159  if (key == 'q')
4160  return AVERROR_EXIT;
4161  if (key == '+') av_log_set_level(av_log_get_level()+10);
4162  if (key == '-') av_log_set_level(av_log_get_level()-10);
4163  if (key == 's') qp_hist ^= 1;
4164  if (key == 'h'){
4165  if (do_hex_dump){
4166  do_hex_dump = do_pkt_dump = 0;
4167  } else if(do_pkt_dump){
4168  do_hex_dump = 1;
4169  } else
4170  do_pkt_dump = 1;
4171  av_log_set_level(AV_LOG_DEBUG);
4172  }
4173  if (key == 'c' || key == 'C'){
4174  char buf[4096], target[64], command[256], arg[256] = {0};
4175  double time;
4176  int k, n = 0;
4177  fprintf(stderr, "\nEnter command: <target>|all <time>|-1 <command>[ <argument>]\n");
4178  i = 0;
4179  set_tty_echo(1);
4180  while ((k = read_key()) != '\n' && k != '\r' && i < sizeof(buf)-1)
4181  if (k > 0)
4182  buf[i++] = k;
4183  buf[i] = 0;
4184  set_tty_echo(0);
4185  fprintf(stderr, "\n");
4186  if (k > 0 &&
4187  (n = sscanf(buf, "%63[^ ] %lf %255[^ ] %255[^\n]", target, &time, command, arg)) >= 3) {
4188  av_log(NULL, AV_LOG_DEBUG, "Processing command target:%s time:%f command:%s arg:%s",
4189  target, time, command, arg);
4190  for (i = 0; i < nb_filtergraphs; i++) {
4191  FilterGraph *fg = filtergraphs[i];
4192  if (fg->graph) {
4193  if (time < 0) {
4194  ret = avfilter_graph_send_command(fg->graph, target, command, arg, buf, sizeof(buf),
4195  key == 'c' ? AVFILTER_CMD_FLAG_ONE : 0);
4196  fprintf(stderr, "Command reply for stream %d: ret:%d res:\n%s", i, ret, buf);
4197  } else if (key == 'c') {
4198  fprintf(stderr, "Queuing commands only on filters supporting the specific command is unsupported\n");
4199  ret = AVERROR_PATCHWELCOME;
4200  } else {
4201  ret = avfilter_graph_queue_command(fg->graph, target, command, arg, 0, time);
4202  if (ret < 0)
4203  fprintf(stderr, "Queuing command failed with error %s\n", av_err2str(ret));
4204  }
4205  }
4206  }
4207  } else {
4208  av_log(NULL, AV_LOG_ERROR,
4209  "Parse error, at least 3 arguments were expected, "
4210  "only %d given in string '%s'\n", n, buf);
4211  }
4212  }
4213  if (key == 'd' || key == 'D'){
4214  int debug=0;
4215  if(key == 'D') {
4216  debug = input_streams[0]->dec_ctx->debug << 1;
4217  if(!debug) debug = 1;
4218  while (debug & FF_DEBUG_DCT_COEFF) //unsupported, would just crash
4219  debug += debug;
4220  }else{
4221  char buf[32];
4222  int k = 0;
4223  i = 0;
4224  set_tty_echo(1);
4225  while ((k = read_key()) != '\n' && k != '\r' && i < sizeof(buf)-1)
4226  if (k > 0)
4227  buf[i++] = k;
4228  buf[i] = 0;
4229  set_tty_echo(0);
4230  fprintf(stderr, "\n");
4231  if (k <= 0 || sscanf(buf, "%d", &debug)!=1)
4232  fprintf(stderr,"error parsing debug value\n");
4233  }
4234  for(i=0;i<nb_input_streams;i++) {
4235  input_streams[i]->dec_ctx->debug = debug;
4236  }
4237  for(i=0;i<nb_output_streams;i++) {
4238  OutputStream *ost = output_streams[i];
4239  ost->enc_ctx->debug = debug;
4240  }
4241  if(debug) av_log_set_level(AV_LOG_DEBUG);
4242  fprintf(stderr,"debug=%d\n", debug);
4243  }
4244  if (key == '?'){
4245  fprintf(stderr, "key function\n"
4246  "? show this help\n"
4247  "+ increase verbosity\n"
4248  "- decrease verbosity\n"
4249  "c Send command to first matching filter supporting it\n"
4250  "C Send/Queue command to all matching filters\n"
4251  "D cycle through available debug modes\n"
4252  "h dump packets/hex press to cycle through the 3 states\n"
4253  "q quit\n"
4254  "s Show QP histogram\n"
4255  );
4256  }
4257  return 0;
4258 }
4259 
4260 #if HAVE_THREADS
4261 static void *input_thread(void *arg)
4262 {
4263  InputFile *f = arg;
4264  unsigned flags = f->non_blocking ? AV_THREAD_MESSAGE_NONBLOCK : 0;
4265  int ret = 0;
4266 
4267  while (1) {
4268  AVPacket pkt;
4269  ret = av_read_frame(f->ctx, &pkt);
4270 
4271  if (ret == AVERROR(EAGAIN)) {
4272  av_usleep(10000);
4273  continue;
4274  }
4275  if (ret < 0) {
4276  av_thread_message_queue_set_err_recv(f->in_thread_queue, ret);
4277  break;
4278  }
4279  ret = av_thread_message_queue_send(f->in_thread_queue, &pkt, flags);
4280  if (flags && ret == AVERROR(EAGAIN)) {
4281  flags = 0;
4282  ret = av_thread_message_queue_send(f->in_thread_queue, &pkt, flags);
4283  av_log(f->ctx, AV_LOG_WARNING,
4284  "Thread message queue blocking; consider raising the "
4285  "thread_queue_size option (current value: %d)\n",
4286  f->thread_queue_size);
4287  }
4288  if (ret < 0) {
4289  if (ret != AVERROR_EOF)
4290  av_log(f->ctx, AV_LOG_ERROR,
4291  "Unable to send packet to main thread: %s\n",
4292  av_err2str(ret));
4293  av_packet_unref(&pkt);
4294  av_thread_message_queue_set_err_recv(f->in_thread_queue, ret);
4295  break;
4296  }
4297  }
4298 
4299  return NULL;
4300 }
4301 
4302 static void free_input_thread(int i)
4303 {
4304  InputFile *f = input_files[i];
4305  AVPacket pkt;
4306 
4307  if (!f || !f->in_thread_queue)
4308  return;
4309  av_thread_message_queue_set_err_send(f->in_thread_queue, AVERROR_EOF);
4310  while (av_thread_message_queue_recv(f->in_thread_queue, &pkt, 0) >= 0)
4311  av_packet_unref(&pkt);
4312 
4313  pthread_join(f->thread, NULL);
4314  f->joined = 1;
4315  av_thread_message_queue_free(&f->in_thread_queue);
4316 }
4317 
4318 static void free_input_threads(void)
4319 {
4320  int i;
4321 
4322  for (i = 0; i < nb_input_files; i++)
4323  free_input_thread(i);
4324 }
4325 
4326 static int init_input_thread(int i)
4327 {
4328  int ret;
4329  InputFile *f = input_files[i];
4330 
4331  if (f->thread_queue_size < 0)
4332  f->thread_queue_size = (nb_input_files > 1 ? 8 : 0);
4333  if (!f->thread_queue_size)
4334  return 0;
4335 
4336  if (f->ctx->pb ? !f->ctx->pb->seekable :
4337  strcmp(f->ctx->iformat->name, "lavfi"))
4338  f->non_blocking = 1;
4339  ret = av_thread_message_queue_alloc(&f->in_thread_queue,
4340  f->thread_queue_size, sizeof(AVPacket));
4341  if (ret < 0)
4342  return ret;
4343 
4344  if ((ret = pthread_create(&f->thread, NULL, input_thread, f))) {
4345  av_log(NULL, AV_LOG_ERROR, "pthread_create failed: %s. Try to increase `ulimit -v` or decrease `ulimit -s`.\n", strerror(ret));
4346  av_thread_message_queue_free(&f->in_thread_queue);
4347  return AVERROR(ret);
4348  }
4349 
4350  return 0;
4351 }
4352 
4353 static int init_input_threads(void)
4354 {
4355  int i, ret;
4356 
4357  for (i = 0; i < nb_input_files; i++) {
4358  ret = init_input_thread(i);
4359  if (ret < 0)
4360  return ret;
4361  }
4362  return 0;
4363 }
4364 
4365 static int get_input_packet_mt(InputFile *f, AVPacket *pkt)
4366 {
4367  return av_thread_message_queue_recv(f->in_thread_queue, pkt,
4368  f->non_blocking ?
4369  AV_THREAD_MESSAGE_NONBLOCK : 0);
4370 }
4371 #endif
4372 
4373 static int get_input_packet(InputFile *f, AVPacket *pkt)
4374 {
4375  if (f->rate_emu) {
4376  int i;
4377  for (i = 0; i < f->nb_streams; i++) {
4378  InputStream *ist = input_streams[f->ist_index + i];
4379  int64_t pts = av_rescale(ist->dts, 1000000, AV_TIME_BASE);
4380  int64_t now = av_gettime_relative() - ist->start;
4381  if (pts > now)
4382  return AVERROR(EAGAIN);
4383  }
4384  }
4385 
4386 #if HAVE_THREADS
4387  if (f->thread_queue_size)
4388  return get_input_packet_mt(f, pkt);
4389 #endif
4390  return av_read_frame(f->ctx, pkt);
4391 }
4392 
4393 static int got_eagain(void)
4394 {
4395  int i;
4396  for (i = 0; i < nb_output_streams; i++)
4397  if (output_streams[i]->unavailable)
4398  return 1;
4399  return 0;
4400 }
4401 
4402 static void reset_eagain(void)
4403 {
4404  int i;
4405  for (i = 0; i < nb_input_files; i++)
4406  input_files[i]->eagain = 0;
4407  for (i = 0; i < nb_output_streams; i++)
4408  output_streams[i]->unavailable = 0;
4409 }
4410 
4411 // set duration to max(tmp, duration) in a proper time base and return duration's time_base
4412 static AVRational duration_max(int64_t tmp, int64_t *duration, AVRational tmp_time_base,
4413  AVRational time_base)
4414 {
4415  int ret;
4416 
4417  if (!*duration) {
4418  *duration = tmp;
4419  return tmp_time_base;
4420  }
4421 
4422  ret = av_compare_ts(*duration, time_base, tmp, tmp_time_base);
4423  if (ret < 0) {
4424  *duration = tmp;
4425  return tmp_time_base;
4426  }
4427 
4428  return time_base;
4429 }
4430 
4431 static int seek_to_start(InputFile *ifile, AVFormatContext *is)
4432 {
4433  InputStream *ist;
4434  AVCodecContext *avctx;
4435  int i, ret, has_audio = 0;
4436  int64_t duration = 0;
4437 
4438  ret = avformat_seek_file(is, -1, INT64_MIN, is->start_time, is->start_time, 0);
4439  if (ret < 0)
4440  return ret;
4441 
4442  for (i = 0; i < ifile->nb_streams; i++) {
4443  ist = input_streams[ifile->ist_index + i];
4444  avctx = ist->dec_ctx;
4445 
4446  /* duration is the length of the last frame in a stream
4447  * when audio stream is present we don't care about
4448  * last video frame length because it's not defined exactly */
4449  if (avctx->codec_type == AVMEDIA_TYPE_AUDIO && ist->nb_samples)
4450  has_audio = 1;
4451  }
4452 
4453  for (i = 0; i < ifile->nb_streams; i++) {
4454  ist = input_streams[ifile->ist_index + i];
4455  avctx = ist->dec_ctx;
4456 
4457  if (has_audio) {
4458  if (avctx->codec_type == AVMEDIA_TYPE_AUDIO && ist->nb_samples) {
4459  AVRational sample_rate = {1, avctx->sample_rate};
4460 
4461  duration = av_rescale_q(ist->nb_samples, sample_rate, ist->st->time_base);
4462  } else {
4463  continue;
4464  }
4465  } else {
4466  if (ist->framerate.num) {
4467  duration = av_rescale_q(1, av_inv_q(ist->framerate), ist->st->time_base);
4468  } else if (ist->st->avg_frame_rate.num) {
4469  duration = av_rescale_q(1, av_inv_q(ist->st->avg_frame_rate), ist->st->time_base);
4470  } else {
4471  duration = 1;
4472  }
4473  }
4474  if (!ifile->duration)
4475  ifile->time_base = ist->st->time_base;
4476  /* the total duration of the stream, max_pts - min_pts is
4477  * the duration of the stream without the last frame */
4478  if (ist->max_pts > ist->min_pts && ist->max_pts - (uint64_t)ist->min_pts < INT64_MAX - duration)
4479  duration += ist->max_pts - ist->min_pts;
4480  ifile->time_base = duration_max(duration, &ifile->duration, ist->st->time_base,
4481  ifile->time_base);
4482  }
4483 
4484  if (ifile->loop > 0)
4485  ifile->loop--;
4486 
4487  return ret;
4488 }
4489 
4490 /*
4491  * Return
4492  * - 0 -- one packet was read and processed
4493  * - AVERROR(EAGAIN) -- no packets were available for selected file,
4494  * this function should be called again
4495  * - AVERROR_EOF -- this function should not be called again
4496  */
4497 static int process_input(int file_index)
4498 {
4499  InputFile *ifile = input_files[file_index];
4500  AVFormatContext *is;
4501  InputStream *ist;
4502  AVPacket pkt;
4503  int ret, thread_ret, i, j;
4504  int64_t duration;
4505  int64_t pkt_dts;
4506  int disable_discontinuity_correction = copy_ts;
4507 
4508  is = ifile->ctx;
4509  ret = get_input_packet(ifile, &pkt);
4510 
4511  if (ret == AVERROR(EAGAIN)) {
4512  ifile->eagain = 1;
4513  return ret;
4514  }
4515  if (ret < 0 && ifile->loop) {
4516  AVCodecContext *avctx;
4517  for (i = 0; i < ifile->nb_streams; i++) {
4518  ist = input_streams[ifile->ist_index + i];
4519  avctx = ist->dec_ctx;
4520  if (ist->decoding_needed) {
4521  ret = process_input_packet(ist, NULL, 1);
4522  if (ret>0)
4523  return 0;
4524  avcodec_flush_buffers(avctx);
4525  }
4526  }
4527 #if HAVE_THREADS
4528  free_input_thread(file_index);
4529 #endif
4530  ret = seek_to_start(ifile, is);
4531 #if HAVE_THREADS
4532  thread_ret = init_input_thread(file_index);
4533  if (thread_ret < 0)
4534  return thread_ret;
4535 #endif
4536  if (ret < 0)
4537  av_log(NULL, AV_LOG_WARNING, "Seek to start failed.\n");
4538  else
4539  ret = get_input_packet(ifile, &pkt);
4540  if (ret == AVERROR(EAGAIN)) {
4541  ifile->eagain = 1;
4542  return ret;
4543  }
4544  }
4545  if (ret < 0) {
4546  if (ret != AVERROR_EOF) {
4547  print_error(is->url, ret);
4548  if (exit_on_error)
4549  exit_program(1);
4550  }
4551 
4552  for (i = 0; i < ifile->nb_streams; i++) {
4553  ist = input_streams[ifile->ist_index + i];
4554  if (ist->decoding_needed) {
4555  ret = process_input_packet(ist, NULL, 0);
4556  if (ret>0)
4557  return 0;
4558  }
4559 
4560  /* mark all outputs that don't go through lavfi as finished */
4561  for (j = 0; j < nb_output_streams; j++) {
4562  OutputStream *ost = output_streams[j];
4563 
4564  if (ost->source_index == ifile->ist_index + i &&
4565  (ost->stream_copy || ost->enc->type == AVMEDIA_TYPE_SUBTITLE))
4566  finish_output_stream(ost);
4567  }
4568  }
4569 
4570  ifile->eof_reached = 1;
4571  return AVERROR(EAGAIN);
4572  }
4573 
4574  reset_eagain();
4575 
4576  if (do_pkt_dump) {
4577  av_pkt_dump_log2(NULL, AV_LOG_INFO, &pkt, do_hex_dump,
4578  is->streams[pkt.stream_index]);
4579  }
4580  /* the following test is needed in case new streams appear
4581  dynamically in stream : we ignore them */
4582  if (pkt.stream_index >= ifile->nb_streams) {
4583  report_new_stream(file_index, &pkt);
4584  goto discard_packet;
4585  }
4586 
4587  ist = input_streams[ifile->ist_index + pkt.stream_index];
4588 
4589  ist->data_size += pkt.size;
4590  ist->nb_packets++;
4591 
4592  if (ist->discard)
4593  goto discard_packet;
4594 
4595  if (pkt.flags & AV_PKT_FLAG_CORRUPT) {
4596  av_log(NULL, exit_on_error ? AV_LOG_FATAL : AV_LOG_WARNING,
4597  "%s: corrupt input packet in stream %d\n", is->url, pkt.stream_index);
4598  if (exit_on_error)
4599  exit_program(1);
4600  }
4601 
4602  if (debug_ts) {
4603  av_log(NULL, AV_LOG_INFO, "demuxer -> ist_index:%d type:%s "
4604  "next_dts:%s next_dts_time:%s next_pts:%s next_pts_time:%s pkt_pts:%s pkt_pts_time:%s pkt_dts:%s pkt_dts_time:%s off:%s off_time:%s\n",
4605  ifile->ist_index + pkt.stream_index, av_get_media_type_string(ist->dec_ctx->codec_type),
4606  av_ts2str(ist->next_dts), av_ts2timestr(ist->next_dts, &AV_TIME_BASE_Q),
4607  av_ts2str(ist->next_pts), av_ts2timestr(ist->next_pts, &AV_TIME_BASE_Q),
4608  av_ts2str(pkt.pts), av_ts2timestr(pkt.pts, &ist->st->time_base),
4609  av_ts2str(pkt.dts), av_ts2timestr(pkt.dts, &ist->st->time_base),
4610  av_ts2str(input_files[ist->file_index]->ts_offset),
4611  av_ts2timestr(input_files[ist->file_index]->ts_offset, &AV_TIME_BASE_Q));
4612  }
4613 
4614  if(!ist->wrap_correction_done && is->start_time != AV_NOPTS_VALUE && ist->st->pts_wrap_bits < 64){
4615  int64_t stime, stime2;
4616  // Correcting starttime based on the enabled streams
4617  // FIXME this ideally should be done before the first use of starttime but we do not know which are the enabled streams at that point.
4618  // so we instead do it here as part of discontinuity handling
4619  if ( ist->next_dts == AV_NOPTS_VALUE
4620  && ifile->ts_offset == -is->start_time
4621  && (is->iformat->flags & AVFMT_TS_DISCONT)) {
4622  int64_t new_start_time = INT64_MAX;
4623  for (i=0; i<is->nb_streams; i++) {
4624  AVStream *st = is->streams[i];
4625  if(st->discard == AVDISCARD_ALL || st->start_time == AV_NOPTS_VALUE)
4626  continue;
4627  new_start_time = FFMIN(new_start_time, av_rescale_q(st->start_time, st->time_base, AV_TIME_BASE_Q));
4628  }
4629  if (new_start_time > is->start_time) {
4630  av_log(is, AV_LOG_VERBOSE, "Correcting start time by %"PRId64"\n", new_start_time - is->start_time);
4631  ifile->ts_offset = -new_start_time;
4632  }
4633  }
4634 
4635  stime = av_rescale_q(is->start_time, AV_TIME_BASE_Q, ist->st->time_base);
4636  stime2= stime + (1ULL<<ist->st->pts_wrap_bits);
4637  ist->wrap_correction_done = 1;
4638 
4639  if(stime2 > stime && pkt.dts != AV_NOPTS_VALUE && pkt.dts > stime + (1LL<<(ist->st->pts_wrap_bits-1))) {
4640  pkt.dts -= 1ULL<<ist->st->pts_wrap_bits;
4641  ist->wrap_correction_done = 0;
4642  }
4643  if(stime2 > stime && pkt.pts != AV_NOPTS_VALUE && pkt.pts > stime + (1LL<<(ist->st->pts_wrap_bits-1))) {
4644  pkt.pts -= 1ULL<<ist->st->pts_wrap_bits;
4645  ist->wrap_correction_done = 0;
4646  }
4647  }
4648 
4649  /* add the stream-global side data to the first packet */
4650  if (ist->nb_packets == 1) {
4651  for (i = 0; i < ist->st->nb_side_data; i++) {
4652  AVPacketSideData *src_sd = &ist->st->side_data[i];
4653  uint8_t *dst_data;
4654 
4655  if (src_sd->type == AV_PKT_DATA_DISPLAYMATRIX)
4656  continue;
4657 
4658  if (av_packet_get_side_data(&pkt, src_sd->type, NULL))
4659  continue;
4660 
4661  dst_data = av_packet_new_side_data(&pkt, src_sd->type, src_sd->size);
4662  if (!dst_data)
4663  exit_program(1);
4664 
4665  memcpy(dst_data, src_sd->data, src_sd->size);
4666  }
4667  }
4668 
4669  if (pkt.dts != AV_NOPTS_VALUE)
4670  pkt.dts += av_rescale_q(ifile->ts_offset, AV_TIME_BASE_Q, ist->st->time_base);
4671  if (pkt.pts != AV_NOPTS_VALUE)
4672  pkt.pts += av_rescale_q(ifile->ts_offset, AV_TIME_BASE_Q, ist->st->time_base);
4673 
4674  if (pkt.pts != AV_NOPTS_VALUE)
4675  pkt.pts *= ist->ts_scale;
4676  if (pkt.dts != AV_NOPTS_VALUE)
4677  pkt.dts *= ist->ts_scale;
4678 
4679  pkt_dts = av_rescale_q_rnd(pkt.dts, ist->st->time_base, AV_TIME_BASE_Q, AV_ROUND_NEAR_INF|AV_ROUND_PASS_MINMAX);
4680  if ((ist->dec_ctx->codec_type == AVMEDIA_TYPE_VIDEO ||
4681  ist->dec_ctx->codec_type == AVMEDIA_TYPE_AUDIO) &&
4682  pkt_dts != AV_NOPTS_VALUE && ist->next_dts == AV_NOPTS_VALUE && !copy_ts
4683  && (is->iformat->flags & AVFMT_TS_DISCONT) && ifile->last_ts != AV_NOPTS_VALUE) {
4684  int64_t delta = pkt_dts - ifile->last_ts;
4685  if (delta < -1LL*dts_delta_threshold*AV_TIME_BASE ||
4686  delta > 1LL*dts_delta_threshold*AV_TIME_BASE){
4687  ifile->ts_offset -= delta;
4688  av_log(NULL, AV_LOG_DEBUG,
4689  "Inter stream timestamp discontinuity %"PRId64", new offset= %"PRId64"\n",
4690  delta, ifile->ts_offset);
4691  pkt.dts -= av_rescale_q(delta, AV_TIME_BASE_Q, ist->st->time_base);
4692  if (pkt.pts != AV_NOPTS_VALUE)
4693  pkt.pts -= av_rescale_q(delta, AV_TIME_BASE_Q, ist->st->time_base);
4694  }
4695  }
4696 
4697  duration = av_rescale_q(ifile->duration, ifile->time_base, ist->st->time_base);
4698  if (pkt.pts != AV_NOPTS_VALUE) {
4699  pkt.pts += duration;
4700  ist->max_pts = FFMAX(pkt.pts, ist->max_pts);
4701  ist->min_pts = FFMIN(pkt.pts, ist->min_pts);
4702  }
4703 
4704  if (pkt.dts != AV_NOPTS_VALUE)
4705  pkt.dts += duration;
4706 
4707  pkt_dts = av_rescale_q_rnd(pkt.dts, ist->st->time_base, AV_TIME_BASE_Q, AV_ROUND_NEAR_INF|AV_ROUND_PASS_MINMAX);
4708 
4709  if (copy_ts && pkt_dts != AV_NOPTS_VALUE && ist->next_dts != AV_NOPTS_VALUE &&
4710  (is->iformat->flags & AVFMT_TS_DISCONT) && ist->st->pts_wrap_bits < 60) {
4711  int64_t wrap_dts = av_rescale_q_rnd(pkt.dts + (1LL<<ist->st->pts_wrap_bits),
4712  ist->st->time_base, AV_TIME_BASE_Q,
4713  AV_ROUND_NEAR_INF|AV_ROUND_PASS_MINMAX);
4714  if (FFABS(wrap_dts - ist->next_dts) < FFABS(pkt_dts - ist->next_dts)/10)
4715  disable_discontinuity_correction = 0;
4716  }
4717 
4718  if ((ist->dec_ctx->codec_type == AVMEDIA_TYPE_VIDEO ||
4719  ist->dec_ctx->codec_type == AVMEDIA_TYPE_AUDIO) &&
4720  pkt_dts != AV_NOPTS_VALUE && ist->next_dts != AV_NOPTS_VALUE &&
4721  !disable_discontinuity_correction) {
4722  int64_t delta = pkt_dts - ist->next_dts;
4723  if (is->iformat->flags & AVFMT_TS_DISCONT) {
4724  if (delta < -1LL*dts_delta_threshold*AV_TIME_BASE ||
4725  delta > 1LL*dts_delta_threshold*AV_TIME_BASE ||
4726  pkt_dts + AV_TIME_BASE/10 < FFMAX(ist->pts, ist->dts)) {
4727  ifile->ts_offset -= delta;
4728  av_log(NULL, AV_LOG_DEBUG,
4729  "timestamp discontinuity for stream #%d:%d "
4730  "(id=%d, type=%s): %"PRId64", new offset= %"PRId64"\n",
4731  ist->file_index, ist->st->index, ist->st->id,
4732  av_get_media_type_string(ist->dec_ctx->codec_type),
4733  delta, ifile->ts_offset);
4734  pkt.dts -= av_rescale_q(delta, AV_TIME_BASE_Q, ist->st->time_base);
4735  if (pkt.pts != AV_NOPTS_VALUE)
4736  pkt.pts -= av_rescale_q(delta, AV_TIME_BASE_Q, ist->st->time_base);
4737  }
4738  } else {
4739  if ( delta < -1LL*dts_error_threshold*AV_TIME_BASE ||
4740  delta > 1LL*dts_error_threshold*AV_TIME_BASE) {
4741  av_log(NULL, AV_LOG_WARNING, "DTS %"PRId64", next:%"PRId64" st:%d invalid dropping\n", pkt.dts, ist->next_dts, pkt.stream_index);
4742  pkt.dts = AV_NOPTS_VALUE;
4743  }
4744  if (pkt.pts != AV_NOPTS_VALUE){
4745  int64_t pkt_pts = av_rescale_q(pkt.pts, ist->st->time_base, AV_TIME_BASE_Q);
4746  delta = pkt_pts - ist->next_dts;
4747  if ( delta < -1LL*dts_error_threshold*AV_TIME_BASE ||
4748  delta > 1LL*dts_error_threshold*AV_TIME_BASE) {
4749  av_log(NULL, AV_LOG_WARNING, "PTS %"PRId64", next:%"PRId64" invalid dropping st:%d\n", pkt.pts, ist->next_dts, pkt.stream_index);
4750  pkt.pts = AV_NOPTS_VALUE;
4751  }
4752  }
4753  }
4754  }
4755 
4756  if (pkt.dts != AV_NOPTS_VALUE)
4757  ifile->last_ts = av_rescale_q(pkt.dts, ist->st->time_base, AV_TIME_BASE_Q);
4758 
4759  if (debug_ts) {
4760  av_log(NULL, AV_LOG_INFO, "demuxer+ffmpeg -> ist_index:%d type:%s pkt_pts:%s pkt_pts_time:%s pkt_dts:%s pkt_dts_time:%s off:%s off_time:%s\n",
4761  ifile->ist_index + pkt.stream_index, av_get_media_type_string(ist->dec_ctx->codec_type),
4762  av_ts2str(pkt.pts), av_ts2timestr(pkt.pts, &ist->st->time_base),
4763  av_ts2str(pkt.dts), av_ts2timestr(pkt.dts, &ist->st->time_base),
4764  av_ts2str(input_files[ist->file_index]->ts_offset),
4765  av_ts2timestr(input_files[ist->file_index]->ts_offset, &AV_TIME_BASE_Q));
4766  }
4767 
4768  sub2video_heartbeat(ist, pkt.pts);
4769 
4770  process_input_packet(ist, &pkt, 0);
4771 
4772 discard_packet:
4773  av_packet_unref(&pkt);
4774 
4775  return 0;
4776 }
4777 
4785 static int transcode_from_filter(FilterGraph *graph, InputStream **best_ist)
4786 {
4787  int i, ret;
4788  int nb_requests, nb_requests_max = 0;
4789  InputFilter *ifilter;
4790  InputStream *ist;
4791 
4792  *best_ist = NULL;
4793  ret = avfilter_graph_request_oldest(graph->graph);
4794  if (ret >= 0)
4795  return reap_filters(0);
4796 
4797  if (ret == AVERROR_EOF) {
4798  ret = reap_filters(1);
4799  for (i = 0; i < graph->nb_outputs; i++)
4800  close_output_stream(graph->outputs[i]->ost);
4801  return ret;
4802  }
4803  if (ret != AVERROR(EAGAIN))
4804  return ret;
4805 
4806  for (i = 0; i < graph->nb_inputs; i++) {
4807  ifilter = graph->inputs[i];
4808  ist = ifilter->ist;
4809  if (input_files[ist->file_index]->eagain ||
4811  continue;
4812  nb_requests = av_buffersrc_get_nb_failed_requests(ifilter->filter);
4813  if (nb_requests > nb_requests_max) {
4814  nb_requests_max = nb_requests;
4815  *best_ist = ist;
4816  }
4817  }
4818 
4819  if (!*best_ist)
4820  for (i = 0; i < graph->nb_outputs; i++)
4821  graph->outputs[i]->ost->unavailable = 1;
4822 
4823  return 0;
4824 }
4825 
4831 static int transcode_step(void)
4832 {
4833  OutputStream *ost;
4834  InputStream *ist = NULL;
4835  int ret;
4836 
4837  ost = choose_output();
4838  if (!ost) {
4839  if (got_eagain()) {
4840  reset_eagain();
4841  av_usleep(10000);
4842  return 0;
4843  }
4844  av_log(NULL, AV_LOG_VERBOSE, "No more inputs to read from, finishing.\n");
4845  return AVERROR_EOF;
4846  }
4847 
4848  if (ost->filter && !ost->filter->graph->graph) {
4850  ret = configure_filtergraph(ost->filter->graph);
4851  if (ret < 0) {
4852  av_log(NULL, AV_LOG_ERROR, "Error reinitializing filters!\n");
4853  return ret;
4854  }
4855  }
4856  }
4857 
4858  if (ost->filter && ost->filter->graph->graph) {
4859  /*
4860  * Similar case to the early audio initialization in reap_filters.
4861  * Audio is special in ffmpeg.c currently as we depend on lavfi's
4862  * audio frame buffering/creation to get the output audio frame size
4863  * in samples correct. The audio frame size for the filter chain is
4864  * configured during the output stream initialization.
4865  *
4866  * Apparently avfilter_graph_request_oldest (called in
4867  * transcode_from_filter just down the line) peeks. Peeking already
4868  * puts one frame "ready to be given out", which means that any
4869  * update in filter buffer sink configuration afterwards will not
4870  * help us. And yes, even if it would be utilized,
4871  * av_buffersink_get_samples is affected, as it internally utilizes
4872  * the same early exit for peeked frames.
4873  *
4874  * In other words, if avfilter_graph_request_oldest would not make
4875  * further filter chain configuration or usage of
4876  * av_buffersink_get_samples useless (by just causing the return
4877  * of the peeked AVFrame as-is), we could get rid of this additional
4878  * early encoder initialization.
4879  */
4880  if (av_buffersink_get_type(ost->filter->filter) == AVMEDIA_TYPE_AUDIO)
4881  init_output_stream_wrapper(ost, NULL, 1);
4882 
4883  if ((ret = transcode_from_filter(ost->filter->graph, &ist)) < 0)
4884  return ret;
4885  if (!ist)
4886  return 0;
4887  } else if (ost->filter) {
4888  int i;
4889  for (i = 0; i < ost->filter->graph->nb_inputs; i++) {
4890  InputFilter *ifilter = ost->filter->graph->inputs[i];
4891  if (!ifilter->ist->got_output && !input_files[ifilter->ist->file_index]->eof_reached) {
4892  ist = ifilter->ist;
4893  break;
4894  }
4895  }
4896  if (!ist) {
4897  ost->inputs_done = 1;
4898  return 0;
4899  }
4900  } else {
4901  av_assert0(ost->source_index >= 0);
4902  ist = input_streams[ost->source_index];
4903  }
4904 
4905  ret = process_input(ist->file_index);
4906  if (ret == AVERROR(EAGAIN)) {
4907  if (input_files[ist->file_index]->eagain)
4908  ost->unavailable = 1;
4909  return 0;
4910  }
4911 
4912  if (ret < 0)
4913  return ret == AVERROR_EOF ? 0 : ret;
4914 
4915  return reap_filters(0);
4916 }
4917 
4918 /*
4919  * The following code is the main loop of the file converter
4920  */
4921 static int transcode(void)
4922 {
4923  int ret, i;
4924  AVFormatContext *os;
4925  OutputStream *ost;
4926  InputStream *ist;
4927  int64_t timer_start;
4928  int64_t total_packets_written = 0;
4929 
4930  ret = transcode_init();
4931  if (ret < 0)
4932  goto fail;
4933 
4934  if (stdin_interaction) {
4935  av_log(NULL, AV_LOG_INFO, "Press [q] to stop, [?] for help\n");
4936  }
4937 
4938  timer_start = av_gettime_relative();
4939 
4940 #if HAVE_THREADS
4941  if ((ret = init_input_threads()) < 0)
4942  goto fail;
4943 #endif
4944 
4946  int64_t cur_time= av_gettime_relative();
4947 
4948  /* if 'q' pressed, exits */
4949  if (stdin_interaction)
4950  if (check_keyboard_interaction(cur_time) < 0)
4951  break;
4952 
4953  /* check if there's any stream where output is still needed */
4954  if (!need_output()) {
4955  av_log(NULL, AV_LOG_VERBOSE, "No more output streams to write to, finishing.\n");
4956  break;
4957  }
4958 
4959  ret = transcode_step();
4960  if (ret < 0 && ret != AVERROR_EOF) {
4961  av_log(NULL, AV_LOG_ERROR, "Error while filtering: %s\n", av_err2str(ret));
4962  break;
4963  }
4964 
4965  /* dump report by using the output first video and audio streams */
4966  print_report(0, timer_start, cur_time);
4967  }
4968 #if HAVE_THREADS
4969  free_input_threads();
4970 #endif
4971 
4972  /* at the end of stream, we must flush the decoder buffers */
4973  for (i = 0; i < nb_input_streams; i++) {
4974  ist = input_streams[i];
4975  if (!input_files[ist->file_index]->eof_reached) {
4976  process_input_packet(ist, NULL, 0);
4977  }
4978  }
4979  flush_encoders();
4980 
4981  term_exit();
4982 
4983  /* write the trailer if needed and close file */
4984  for (i = 0; i < nb_output_files; i++) {
4985  os = output_files[i]->ctx;
4986  if (!output_files[i]->header_written) {
4987  av_log(NULL, AV_LOG_ERROR,
4988  "Nothing was written into output file %d (%s), because "
4989  "at least one of its streams received no packets.\n",
4990  i, os->url);
4991  continue;
4992  }
4993  if ((ret = av_write_trailer(os)) < 0) {
4994  av_log(NULL, AV_LOG_ERROR, "Error writing trailer of %s: %s\n", os->url, av_err2str(ret));
4995  if (exit_on_error)
4996  exit_program(1);
4997  }
4998  }
4999 
5000  /* dump report by using the first video and audio streams */
5001  print_report(1, timer_start, av_gettime_relative());
5002 
5003  /* close each encoder */
5004  for (i = 0; i < nb_output_streams; i++) {
5005  ost = output_streams[i];
5006  if (ost->encoding_needed) {
5007  av_freep(&ost->enc_ctx->stats_in);
5008  }
5009  total_packets_written += ost->packets_written;
5011  av_log(NULL, AV_LOG_FATAL, "Empty output on stream %d.\n", i);
5012  exit_program(1);
5013  }
5014  }
5015 
5016  if (!total_packets_written && (abort_on_flags & ABORT_ON_FLAG_EMPTY_OUTPUT)) {
5017  av_log(NULL, AV_LOG_FATAL, "Empty output\n");
5018  exit_program(1);
5019  }
5020 
5021  /* close each decoder */
5022  for (i = 0; i < nb_input_streams; i++) {
5023  ist = input_streams[i];
5024  if (ist->decoding_needed) {
5025  avcodec_close(ist->dec_ctx);
5026  if (ist->hwaccel_uninit)
5027  ist->hwaccel_uninit(ist->dec_ctx);
5028  }
5029  }
5030 
5032 
5033  /* finished ! */
5034  ret = 0;
5035 
5036  fail:
5037 #if HAVE_THREADS
5038  free_input_threads();
5039 #endif
5040 
5041  if (output_streams) {
5042  for (i = 0; i < nb_output_streams; i++) {
5043  ost = output_streams[i];
5044  if (ost) {
5045  if (ost->logfile) {
5046  if (fclose(ost->logfile))
5047  av_log(NULL, AV_LOG_ERROR,
5048  "Error closing logfile, loss of information possible: %s\n",
5049  av_err2str(AVERROR(errno)));
5050  ost->logfile = NULL;
5051  }
5052  av_freep(&ost->forced_kf_pts);
5053  av_freep(&ost->apad);
5054  av_freep(&ost->disposition);
5055  av_dict_free(&ost->encoder_opts);
5056  av_dict_free(&ost->sws_dict);
5057  av_dict_free(&ost->swr_opts);
5058  av_dict_free(&ost->resample_opts);
5059  }
5060  }
5061  }
5062  return ret;
5063 }
5064 
5066 {
5067  BenchmarkTimeStamps time_stamps = { av_gettime_relative() };
5068 #if HAVE_GETRUSAGE
5069  struct rusage rusage;
5070 
5071  getrusage(RUSAGE_SELF, &rusage);
5072  time_stamps.user_usec =
5073  (rusage.ru_utime.tv_sec * 1000000LL) + rusage.ru_utime.tv_usec;
5074  time_stamps.sys_usec =
5075  (rusage.ru_stime.tv_sec * 1000000LL) + rusage.ru_stime.tv_usec;
5076 #elif HAVE_GETPROCESSTIMES
5077  HANDLE proc;
5078  FILETIME c, e, k, u;
5079  proc = GetCurrentProcess();
5080  GetProcessTimes(proc, &c, &e, &k, &u);
5081  time_stamps.user_usec =
5082  ((int64_t)u.dwHighDateTime << 32 | u.dwLowDateTime) / 10;
5083  time_stamps.sys_usec =
5084  ((int64_t)k.dwHighDateTime << 32 | k.dwLowDateTime) / 10;
5085 #else
5086  time_stamps.user_usec = time_stamps.sys_usec = 0;
5087 #endif
5088  return time_stamps;
5089 }
5090 
5091 static int64_t getmaxrss(void)
5092 {
5093 #if HAVE_GETRUSAGE && HAVE_STRUCT_RUSAGE_RU_MAXRSS
5094  struct rusage rusage;
5095  getrusage(RUSAGE_SELF, &rusage);
5096  return (int64_t)rusage.ru_maxrss * 1024;
5097 #elif HAVE_GETPROCESSMEMORYINFO
5098  HANDLE proc;
5099  PROCESS_MEMORY_COUNTERS memcounters;
5100  proc = GetCurrentProcess();
5101  memcounters.cb = sizeof(memcounters);
5102  GetProcessMemoryInfo(proc, &memcounters, sizeof(memcounters));
5103  return memcounters.PeakPagefileUsage;
5104 #else
5105  return 0;
5106 #endif
5107 }
5108 
5109 static void log_callback_null(void *ptr, int level, const char *fmt, va_list vl)
5110 {
5111 }
5112 
5115  longjmp_value = 0;
5116  received_sigterm = 0;
5117  received_nb_signals = 0;
5118  ffmpeg_exited = 0;
5119  copy_ts_first_pts = AV_NOPTS_VALUE;
5120 
5121  run_as_daemon = 0;
5122  nb_frames_dup = 0;
5123  dup_warning = 1000;
5124  nb_frames_drop = 0;
5125  nb_output_dumped = 0;
5126 
5127  want_sdp = 1;
5128 
5129  progress_avio = NULL;
5130 
5131  input_streams = NULL;
5132  nb_input_streams = 0;
5133  input_files = NULL;
5134  nb_input_files = 0;
5135 
5136  output_streams = NULL;
5137  nb_output_streams = 0;
5138  output_files = NULL;
5139  nb_output_files = 0;
5140 
5141  filtergraphs = NULL;
5142  nb_filtergraphs = 0;
5143 
5144  last_time = -1;
5145  keyboard_last_time = 0;
5146  first_report = 1;
5147 }
5148 
5149 void set_report_callback(void (*callback)(int, float, float, int64_t, int, double, double))
5150 {
5151  report_callback = callback;
5152 }
5153 
5154 void cancel_operation(long id)
5155 {
5156  if (id == 0) {
5157  sigterm_handler(SIGINT);
5158  } else {
5159  cancelSession(id);
5160  }
5161 }
5162 
5163 __thread OptionDef *ffmpeg_options = NULL;
5164 
5165 int ffmpeg_execute(int argc, char **argv)
5166 {
5167  char _program_name[] = "ffmpeg";
5168  program_name = (char*)&_program_name;
5169  program_birth_year = 2000;
5170 
5171  #define OFFSET(x) offsetof(OptionsContext, x)
5172  OptionDef options[] = {
5173 
5174  /* main options */
5175  { "L", OPT_EXIT, { .func_arg = show_license }, "show license" },
5176  { "h", OPT_EXIT, { .func_arg = show_help }, "show help", "topic" },
5177  { "?", OPT_EXIT, { .func_arg = show_help }, "show help", "topic" },
5178  { "help", OPT_EXIT, { .func_arg = show_help }, "show help", "topic" },
5179  { "-help", OPT_EXIT, { .func_arg = show_help }, "show help", "topic" },
5180  { "version", OPT_EXIT, { .func_arg = show_version }, "show version" },
5181  { "buildconf", OPT_EXIT, { .func_arg = show_buildconf }, "show build configuration" },
5182  { "formats", OPT_EXIT, { .func_arg = show_formats }, "show available formats" },
5183  { "muxers", OPT_EXIT, { .func_arg = show_muxers }, "show available muxers" },
5184  { "demuxers", OPT_EXIT, { .func_arg = show_demuxers }, "show available demuxers" },
5185  { "devices", OPT_EXIT, { .func_arg = show_devices }, "show available devices" },
5186  { "codecs", OPT_EXIT, { .func_arg = show_codecs }, "show available codecs" },
5187  { "decoders", OPT_EXIT, { .func_arg = show_decoders }, "show available decoders" },
5188  { "encoders", OPT_EXIT, { .func_arg = show_encoders }, "show available encoders" },
5189  { "bsfs", OPT_EXIT, { .func_arg = show_bsfs }, "show available bit stream filters" },
5190  { "protocols", OPT_EXIT, { .func_arg = show_protocols }, "show available protocols" },
5191  { "filters", OPT_EXIT, { .func_arg = show_filters }, "show available filters" },
5192  { "pix_fmts", OPT_EXIT, { .func_arg = show_pix_fmts }, "show available pixel formats" },
5193  { "layouts", OPT_EXIT, { .func_arg = show_layouts }, "show standard channel layouts" },
5194  { "sample_fmts", OPT_EXIT, { .func_arg = show_sample_fmts }, "show available audio sample formats" },
5195  { "colors", OPT_EXIT, { .func_arg = show_colors }, "show available color names" },
5196  { "loglevel", HAS_ARG, { .func_arg = opt_loglevel }, "set logging level", "loglevel" },
5197  { "v", HAS_ARG, { .func_arg = opt_loglevel }, "set logging level", "loglevel" },
5198  { "report", 0, { .func_arg = opt_report }, "generate a report" },
5199  { "max_alloc", HAS_ARG, { .func_arg = opt_max_alloc }, "set maximum size of a single allocated block", "bytes" },
5200  { "cpuflags", HAS_ARG | OPT_EXPERT, { .func_arg = opt_cpuflags }, "force specific cpu flags", "flags" },
5201  { "hide_banner", OPT_BOOL | OPT_EXPERT, {&hide_banner}, "do not show program banner", "hide_banner" },
5202 
5203  #if CONFIG_AVDEVICE
5204  { "sources" , OPT_EXIT | HAS_ARG, { .func_arg = show_sources },
5205  "list sources of the input device", "device" },
5206  { "sinks" , OPT_EXIT | HAS_ARG, { .func_arg = show_sinks },
5207  "list sinks of the output device", "device" },
5208  #endif
5209 
5210  { "f", HAS_ARG | OPT_STRING | OPT_OFFSET |
5211  OPT_INPUT | OPT_OUTPUT, { .off = OFFSET(format) },
5212  "force format", "fmt" },
5213  { "y", OPT_BOOL, { &file_overwrite },
5214  "overwrite output files" },
5215  { "n", OPT_BOOL, { &no_file_overwrite },
5216  "never overwrite output files" },
5217  { "ignore_unknown", OPT_BOOL, { &ignore_unknown_streams },
5218  "Ignore unknown stream types" },
5219  { "copy_unknown", OPT_BOOL | OPT_EXPERT, { &copy_unknown_streams },
5220  "Copy unknown stream types" },
5221  { "c", HAS_ARG | OPT_STRING | OPT_SPEC |
5222  OPT_INPUT | OPT_OUTPUT, { .off = OFFSET(codec_names) },
5223  "codec name", "codec" },
5224  { "codec", HAS_ARG | OPT_STRING | OPT_SPEC |
5225  OPT_INPUT | OPT_OUTPUT, { .off = OFFSET(codec_names) },
5226  "codec name", "codec" },
5227  { "pre", HAS_ARG | OPT_STRING | OPT_SPEC |
5228  OPT_OUTPUT, { .off = OFFSET(presets) },
5229  "preset name", "preset" },
5230  { "map", HAS_ARG | OPT_EXPERT | OPT_PERFILE |
5231  OPT_OUTPUT, { .func_arg = opt_map },
5232  "set input stream mapping",
5233  "[-]input_file_id[:stream_specifier][,sync_file_id[:stream_specifier]]" },
5234  { "map_channel", HAS_ARG | OPT_EXPERT | OPT_PERFILE | OPT_OUTPUT, { .func_arg = opt_map_channel },
5235  "map an audio channel from one stream to another", "file.stream.channel[:syncfile.syncstream]" },
5236  { "map_metadata", HAS_ARG | OPT_STRING | OPT_SPEC |
5237  OPT_OUTPUT, { .off = OFFSET(metadata_map) },
5238  "set metadata information of outfile from infile",
5239  "outfile[,metadata]:infile[,metadata]" },
5240  { "map_chapters", HAS_ARG | OPT_INT | OPT_EXPERT | OPT_OFFSET |
5241  OPT_OUTPUT, { .off = OFFSET(chapters_input_file) },
5242  "set chapters mapping", "input_file_index" },
5243  { "t", HAS_ARG | OPT_TIME | OPT_OFFSET |
5244  OPT_INPUT | OPT_OUTPUT, { .off = OFFSET(recording_time) },
5245  "record or transcode \"duration\" seconds of audio/video",
5246  "duration" },
5247  { "to", HAS_ARG | OPT_TIME | OPT_OFFSET | OPT_INPUT | OPT_OUTPUT, { .off = OFFSET(stop_time) },
5248  "record or transcode stop time", "time_stop" },
5249  { "fs", HAS_ARG | OPT_INT64 | OPT_OFFSET | OPT_OUTPUT, { .off = OFFSET(limit_filesize) },
5250  "set the limit file size in bytes", "limit_size" },
5251  { "ss", HAS_ARG | OPT_TIME | OPT_OFFSET |
5252  OPT_INPUT | OPT_OUTPUT, { .off = OFFSET(start_time) },
5253  "set the start time offset", "time_off" },
5254  { "sseof", HAS_ARG | OPT_TIME | OPT_OFFSET |
5255  OPT_INPUT, { .off = OFFSET(start_time_eof) },
5256  "set the start time offset relative to EOF", "time_off" },
5257  { "seek_timestamp", HAS_ARG | OPT_INT | OPT_OFFSET |
5258  OPT_INPUT, { .off = OFFSET(seek_timestamp) },
5259  "enable/disable seeking by timestamp with -ss" },
5260  { "accurate_seek", OPT_BOOL | OPT_OFFSET | OPT_EXPERT |
5261  OPT_INPUT, { .off = OFFSET(accurate_seek) },
5262  "enable/disable accurate seeking with -ss" },
5263  { "itsoffset", HAS_ARG | OPT_TIME | OPT_OFFSET |
5264  OPT_EXPERT | OPT_INPUT, { .off = OFFSET(input_ts_offset) },
5265  "set the input ts offset", "time_off" },
5266  { "itsscale", HAS_ARG | OPT_DOUBLE | OPT_SPEC |
5267  OPT_EXPERT | OPT_INPUT, { .off = OFFSET(ts_scale) },
5268  "set the input ts scale", "scale" },
5269  { "timestamp", HAS_ARG | OPT_PERFILE | OPT_OUTPUT, { .func_arg = opt_recording_timestamp },
5270  "set the recording timestamp ('now' to set the current time)", "time" },
5271  { "metadata", HAS_ARG | OPT_STRING | OPT_SPEC | OPT_OUTPUT, { .off = OFFSET(metadata) },
5272  "add metadata", "string=string" },
5273  { "program", HAS_ARG | OPT_STRING | OPT_SPEC | OPT_OUTPUT, { .off = OFFSET(program) },
5274  "add program with specified streams", "title=string:st=number..." },
5275  { "dframes", HAS_ARG | OPT_PERFILE | OPT_EXPERT |
5276  OPT_OUTPUT, { .func_arg = opt_data_frames },
5277  "set the number of data frames to output", "number" },
5278  { "benchmark", OPT_BOOL | OPT_EXPERT, { &do_benchmark },
5279  "add timings for benchmarking" },
5280  { "benchmark_all", OPT_BOOL | OPT_EXPERT, { &do_benchmark_all },
5281  "add timings for each task" },
5282  { "progress", HAS_ARG | OPT_EXPERT, { .func_arg = opt_progress },
5283  "write program-readable progress information", "url" },
5284  { "stdin", OPT_BOOL | OPT_EXPERT, { &stdin_interaction },
5285  "enable or disable interaction on standard input" },
5286  { "timelimit", HAS_ARG | OPT_EXPERT, { .func_arg = opt_timelimit },
5287  "set max runtime in seconds in CPU user time", "limit" },
5288  { "dump", OPT_BOOL | OPT_EXPERT, { &do_pkt_dump },
5289  "dump each input packet" },
5290  { "hex", OPT_BOOL | OPT_EXPERT, { &do_hex_dump },
5291  "when dumping packets, also dump the payload" },
5292  { "re", OPT_BOOL | OPT_EXPERT | OPT_OFFSET |
5293  OPT_INPUT, { .off = OFFSET(rate_emu) },
5294  "read input at native frame rate", "" },
5295  { "target", HAS_ARG | OPT_PERFILE | OPT_OUTPUT, { .func_arg = opt_target },
5296  "specify target file type (\"vcd\", \"svcd\", \"dvd\", \"dv\" or \"dv50\" "
5297  "with optional prefixes \"pal-\", \"ntsc-\" or \"film-\")", "type" },
5298  { "vsync", HAS_ARG | OPT_EXPERT, { .func_arg = opt_vsync },
5299  "video sync method", "" },
5300  { "frame_drop_threshold", HAS_ARG | OPT_FLOAT | OPT_EXPERT, { &frame_drop_threshold },
5301  "frame drop threshold", "" },
5302  { "async", HAS_ARG | OPT_INT | OPT_EXPERT, { &audio_sync_method },
5303  "audio sync method", "" },
5304  { "adrift_threshold", HAS_ARG | OPT_FLOAT | OPT_EXPERT, { &audio_drift_threshold },
5305  "audio drift threshold", "threshold" },
5306  { "copyts", OPT_BOOL | OPT_EXPERT, { &copy_ts },
5307  "copy timestamps" },
5308  { "start_at_zero", OPT_BOOL | OPT_EXPERT, { &start_at_zero },
5309  "shift input timestamps to start at 0 when using copyts" },
5310  { "copytb", HAS_ARG | OPT_INT | OPT_EXPERT, { &copy_tb },
5311  "copy input stream time base when stream copying", "mode" },
5312  { "start_at_zero", OPT_BOOL | OPT_EXPERT, { &start_at_zero },
5313  "shift input timestamps to start at 0 when using copyts" },
5314  { "copytb", HAS_ARG | OPT_INT | OPT_EXPERT, { &copy_tb },
5315  "copy input stream time base when stream copying", "mode" },
5316  { "shortest", OPT_BOOL | OPT_EXPERT | OPT_OFFSET |
5317  OPT_OUTPUT, { .off = OFFSET(shortest) },
5318  "finish encoding within shortest input" },
5319  { "bitexact", OPT_BOOL | OPT_EXPERT | OPT_OFFSET |
5320  OPT_OUTPUT | OPT_INPUT, { .off = OFFSET(bitexact) },
5321  "bitexact mode" },
5322  { "apad", OPT_STRING | HAS_ARG | OPT_SPEC |
5323  OPT_OUTPUT, { .off = OFFSET(apad) },
5324  "audio pad", "" },
5325  { "dts_delta_threshold", HAS_ARG | OPT_FLOAT | OPT_EXPERT, { &dts_delta_threshold },
5326  "timestamp discontinuity delta threshold", "threshold" },
5327  { "dts_error_threshold", HAS_ARG | OPT_FLOAT | OPT_EXPERT, { &dts_error_threshold },
5328  "timestamp error delta threshold", "threshold" },
5329  { "xerror", OPT_BOOL | OPT_EXPERT, { &exit_on_error },
5330  "exit on error", "error" },
5331  { "abort_on", HAS_ARG | OPT_EXPERT, { .func_arg = opt_abort_on },
5332  "abort on the specified condition flags", "flags" },
5333  { "copyinkf", OPT_BOOL | OPT_EXPERT | OPT_SPEC |
5334  OPT_OUTPUT, { .off = OFFSET(copy_initial_nonkeyframes) },
5335  "copy initial non-keyframes" },
5336  { "copypriorss", OPT_INT | HAS_ARG | OPT_EXPERT | OPT_SPEC | OPT_OUTPUT, { .off = OFFSET(copy_prior_start) },
5337  "copy or discard frames before start time" },
5338  { "frames", OPT_INT64 | HAS_ARG | OPT_SPEC | OPT_OUTPUT, { .off = OFFSET(max_frames) },
5339  "set the number of frames to output", "number" },
5340  { "tag", OPT_STRING | HAS_ARG | OPT_SPEC |
5341  OPT_EXPERT | OPT_OUTPUT | OPT_INPUT, { .off = OFFSET(codec_tags) },
5342  "force codec tag/fourcc", "fourcc/tag" },
5343  { "q", HAS_ARG | OPT_EXPERT | OPT_DOUBLE |
5344  OPT_SPEC | OPT_OUTPUT, { .off = OFFSET(qscale) },
5345  "use fixed quality scale (VBR)", "q" },
5346  { "qscale", HAS_ARG | OPT_EXPERT | OPT_PERFILE |
5347  OPT_OUTPUT, { .func_arg = opt_qscale },
5348  "use fixed quality scale (VBR)", "q" },
5349  { "profile", HAS_ARG | OPT_EXPERT | OPT_PERFILE | OPT_OUTPUT, { .func_arg = opt_profile },
5350  "set profile", "profile" },
5351  { "filter", HAS_ARG | OPT_STRING | OPT_SPEC | OPT_OUTPUT, { .off = OFFSET(filters) },
5352  "set stream filtergraph", "filter_graph" },
5353  { "filter_threads", HAS_ARG | OPT_INT, { &filter_nbthreads },
5354  "number of non-complex filter threads" },
5355  { "filter_script", HAS_ARG | OPT_STRING | OPT_SPEC | OPT_OUTPUT, { .off = OFFSET(filter_scripts) },
5356  "read stream filtergraph description from a file", "filename" },
5357  { "reinit_filter", HAS_ARG | OPT_INT | OPT_SPEC | OPT_INPUT, { .off = OFFSET(reinit_filters) },
5358  "reinit filtergraph on input parameter changes", "" },
5359  { "filter_complex", HAS_ARG | OPT_EXPERT, { .func_arg = opt_filter_complex },
5360  "create a complex filtergraph", "graph_description" },
5361  { "filter_complex_threads", HAS_ARG | OPT_INT, { &filter_complex_nbthreads },
5362  "number of threads for -filter_complex" },
5363  { "lavfi", HAS_ARG | OPT_EXPERT, { .func_arg = opt_filter_complex },
5364  "create a complex filtergraph", "graph_description" },
5365  { "filter_complex_script", HAS_ARG | OPT_EXPERT, { .func_arg = opt_filter_complex_script },
5366  "read complex filtergraph description from a file", "filename" },
5367  { "auto_conversion_filters", OPT_BOOL | OPT_EXPERT, { &auto_conversion_filters },
5368  "enable automatic conversion filters globally" },
5369  { "stats", OPT_BOOL, { &print_stats },
5370  "print progress report during encoding", },
5371  { "stats_period", HAS_ARG | OPT_EXPERT, { .func_arg = opt_stats_period },
5372  "set the period at which ffmpeg updates stats and -progress output", "time" },
5373  { "attach", HAS_ARG | OPT_PERFILE | OPT_EXPERT |
5374  OPT_OUTPUT, { .func_arg = opt_attach },
5375  "add an attachment to the output file", "filename" },
5376  { "dump_attachment", HAS_ARG | OPT_STRING | OPT_SPEC |
5377  OPT_EXPERT | OPT_INPUT, { .off = OFFSET(dump_attachment) },
5378  "extract an attachment into a file", "filename" },
5379  { "stream_loop", OPT_INT | HAS_ARG | OPT_EXPERT | OPT_INPUT |
5380  OPT_OFFSET, { .off = OFFSET(loop) }, "set number of times input stream shall be looped", "loop count" },
5381  { "debug_ts", OPT_BOOL | OPT_EXPERT, { &debug_ts },
5382  "print timestamp debugging info" },
5383  { "max_error_rate", HAS_ARG | OPT_FLOAT, { &max_error_rate },
5384  "ratio of errors (0.0: no errors, 1.0: 100% errors) above which ffmpeg returns an error instead of success.", "maximum error rate" },
5385  { "discard", OPT_STRING | HAS_ARG | OPT_SPEC |
5386  OPT_INPUT, { .off = OFFSET(discard) },
5387  "discard", "" },
5388  { "disposition", OPT_STRING | HAS_ARG | OPT_SPEC |
5389  OPT_OUTPUT, { .off = OFFSET(disposition) },
5390  "disposition", "" },
5391  { "thread_queue_size", HAS_ARG | OPT_INT | OPT_OFFSET | OPT_EXPERT | OPT_INPUT,
5392  { .off = OFFSET(thread_queue_size) },
5393  "set the maximum number of queued packets from the demuxer" },
5394  { "find_stream_info", OPT_BOOL | OPT_PERFILE | OPT_INPUT | OPT_EXPERT, { &find_stream_info },
5395  "read and decode the streams to fill missing information with heuristics" },
5396 
5397  /* video options */
5398  { "vframes", OPT_VIDEO | HAS_ARG | OPT_PERFILE | OPT_OUTPUT, { .func_arg = opt_video_frames },
5399  "set the number of video frames to output", "number" },
5400  { "r", OPT_VIDEO | HAS_ARG | OPT_STRING | OPT_SPEC |
5401  OPT_INPUT | OPT_OUTPUT, { .off = OFFSET(frame_rates) },
5402  "set frame rate (Hz value, fraction or abbreviation)", "rate" },
5404  OPT_INPUT | OPT_OUTPUT, { .off = OFFSET(frame_sizes) },
5405  "set frame size (WxH or abbreviation)", "size" },
5406  { "aspect", OPT_VIDEO | HAS_ARG | OPT_STRING | OPT_SPEC |
5407  OPT_OUTPUT, { .off = OFFSET(frame_aspect_ratios) },
5408  "set aspect ratio (4:3, 16:9 or 1.3333, 1.7777)", "aspect" },
5409  { "pix_fmt", OPT_VIDEO | HAS_ARG | OPT_EXPERT | OPT_STRING | OPT_SPEC |
5410  OPT_INPUT | OPT_OUTPUT, { .off = OFFSET(frame_pix_fmts) },
5411  "set pixel format", "format" },
5412  { "bits_per_raw_sample", OPT_VIDEO | OPT_INT | HAS_ARG, { &frame_bits_per_raw_sample },
5413  "set the number of bits per raw sample", "number" },
5414  { "intra", OPT_VIDEO | OPT_BOOL | OPT_EXPERT, { &intra_only },
5415  "deprecated use -g 1" },
5416  { "vn", OPT_VIDEO | OPT_BOOL | OPT_OFFSET | OPT_INPUT | OPT_OUTPUT,{ .off = OFFSET(video_disable) },
5417  "disable video" },
5418  { "rc_override", OPT_VIDEO | HAS_ARG | OPT_EXPERT | OPT_STRING | OPT_SPEC |
5419  OPT_OUTPUT, { .off = OFFSET(rc_overrides) },
5420  "rate control override for specific intervals", "override" },
5421  { "vcodec", OPT_VIDEO | HAS_ARG | OPT_PERFILE | OPT_INPUT |
5422  OPT_OUTPUT, { .func_arg = opt_video_codec },
5423  "force video codec ('copy' to copy stream)", "codec" },
5424  { "sameq", OPT_VIDEO | OPT_EXPERT , { .func_arg = opt_sameq },
5425  "Removed" },
5426  { "same_quant", OPT_VIDEO | OPT_EXPERT , { .func_arg = opt_sameq },
5427  "Removed" },
5428  { "timecode", OPT_VIDEO | HAS_ARG | OPT_PERFILE | OPT_OUTPUT, { .func_arg = opt_timecode },
5429  "set initial TimeCode value.", "hh:mm:ss[:;.]ff" },
5430  { "pass", OPT_VIDEO | HAS_ARG | OPT_SPEC | OPT_INT | OPT_OUTPUT, { .off = OFFSET(pass) },
5431  "select the pass number (1 to 3)", "n" },
5432  { "passlogfile", OPT_VIDEO | HAS_ARG | OPT_STRING | OPT_EXPERT | OPT_SPEC |
5433  OPT_OUTPUT, { .off = OFFSET(passlogfiles) },
5434  "select two pass log file name prefix", "prefix" },
5435  { "deinterlace", OPT_VIDEO | OPT_BOOL | OPT_EXPERT, { &do_deinterlace },
5436  "this option is deprecated, use the yadif filter instead" },
5437  { "psnr", OPT_VIDEO | OPT_BOOL | OPT_EXPERT, { &do_psnr },
5438  "calculate PSNR of compressed frames" },
5439  { "vstats", OPT_VIDEO | OPT_EXPERT , { .func_arg = opt_vstats },
5440  "dump video coding statistics to file" },
5441  { "vstats_file", OPT_VIDEO | HAS_ARG | OPT_EXPERT , { .func_arg = opt_vstats_file },
5442  "dump video coding statistics to file", "file" },
5443  { "vstats_version", OPT_VIDEO | OPT_INT | HAS_ARG | OPT_EXPERT , { &vstats_version },
5444  "Version of the vstats format to use."},
5445  { "vf", OPT_VIDEO | HAS_ARG | OPT_PERFILE | OPT_OUTPUT, { .func_arg = opt_video_filters },
5446  "set video filters", "filter_graph" },
5447  { "intra_matrix", OPT_VIDEO | HAS_ARG | OPT_EXPERT | OPT_STRING | OPT_SPEC |
5448  OPT_OUTPUT, { .off = OFFSET(intra_matrices) },
5449  "specify intra matrix coeffs", "matrix" },
5450  { "inter_matrix", OPT_VIDEO | HAS_ARG | OPT_EXPERT | OPT_STRING | OPT_SPEC |
5451  OPT_OUTPUT, { .off = OFFSET(inter_matrices) },
5452  "specify inter matrix coeffs", "matrix" },
5453  { "chroma_intra_matrix", OPT_VIDEO | HAS_ARG | OPT_EXPERT | OPT_STRING | OPT_SPEC |
5454  OPT_OUTPUT, { .off = OFFSET(chroma_intra_matrices) },
5455  "specify intra matrix coeffs", "matrix" },
5456  { "top", OPT_VIDEO | HAS_ARG | OPT_EXPERT | OPT_INT| OPT_SPEC |
5457  OPT_INPUT | OPT_OUTPUT, { .off = OFFSET(top_field_first) },
5458  "top=1/bottom=0/auto=-1 field first", "" },
5459  { "vtag", OPT_VIDEO | HAS_ARG | OPT_EXPERT | OPT_PERFILE |
5460  OPT_INPUT | OPT_OUTPUT, { .func_arg = opt_old2new },
5461  "force video tag/fourcc", "fourcc/tag" },
5462  { "qphist", OPT_VIDEO | OPT_BOOL | OPT_EXPERT , { &qp_hist },
5463  "show QP histogram" },
5464  { "force_fps", OPT_VIDEO | OPT_BOOL | OPT_EXPERT | OPT_SPEC |
5465  OPT_OUTPUT, { .off = OFFSET(force_fps) },
5466  "force the selected framerate, disable the best supported framerate selection" },
5467  { "streamid", OPT_VIDEO | HAS_ARG | OPT_EXPERT | OPT_PERFILE |
5468  OPT_OUTPUT, { .func_arg = opt_streamid },
5469  "set the value of an outfile streamid", "streamIndex:value" },
5470  { "force_key_frames", OPT_VIDEO | OPT_STRING | HAS_ARG | OPT_EXPERT |
5471  OPT_SPEC | OPT_OUTPUT, { .off = OFFSET(forced_key_frames) },
5472  "force key frames at specified timestamps", "timestamps" },
5473  { "ab", OPT_VIDEO | HAS_ARG | OPT_PERFILE | OPT_OUTPUT, { .func_arg = opt_bitrate },
5474  "audio bitrate (please use -b:a)", "bitrate" },
5475  { "b", OPT_VIDEO | HAS_ARG | OPT_PERFILE | OPT_OUTPUT, { .func_arg = opt_bitrate },
5476  "video bitrate (please use -b:v)", "bitrate" },
5477  { "hwaccel", OPT_VIDEO | OPT_STRING | HAS_ARG | OPT_EXPERT |
5478  OPT_SPEC | OPT_INPUT, { .off = OFFSET(hwaccels) },
5479  "use HW accelerated decoding", "hwaccel name" },
5480  { "hwaccel_device", OPT_VIDEO | OPT_STRING | HAS_ARG | OPT_EXPERT |
5481  OPT_SPEC | OPT_INPUT, { .off = OFFSET(hwaccel_devices) },
5482  "select a device for HW acceleration", "devicename" },
5483  { "hwaccel_output_format", OPT_VIDEO | OPT_STRING | HAS_ARG | OPT_EXPERT |
5484  OPT_SPEC | OPT_INPUT, { .off = OFFSET(hwaccel_output_formats) },
5485  "select output format used with HW accelerated decoding", "format" },
5486  #if CONFIG_VIDEOTOOLBOX
5487  { "videotoolbox_pixfmt", HAS_ARG | OPT_STRING | OPT_EXPERT, { &videotoolbox_pixfmt}, "" },
5488  #endif
5489  { "hwaccels", OPT_EXIT, { .func_arg = show_hwaccels },
5490  "show available HW acceleration methods" },
5491  { "autorotate", HAS_ARG | OPT_BOOL | OPT_SPEC |
5492  OPT_EXPERT | OPT_INPUT, { .off = OFFSET(autorotate) },
5493  "automatically insert correct rotate filters" },
5494  { "autoscale", HAS_ARG | OPT_BOOL | OPT_SPEC |
5495  OPT_EXPERT | OPT_OUTPUT, { .off = OFFSET(autoscale) },
5496  "automatically insert a scale filter at the end of the filter graph" },
5497 
5498  /* audio options */
5499  { "aframes", OPT_AUDIO | HAS_ARG | OPT_PERFILE | OPT_OUTPUT, { .func_arg = opt_audio_frames },
5500  "set the number of audio frames to output", "number" },
5501  { "aq", OPT_AUDIO | HAS_ARG | OPT_PERFILE | OPT_OUTPUT, { .func_arg = opt_audio_qscale },
5502  "set audio quality (codec-specific)", "quality", },
5503  { "ar", OPT_AUDIO | HAS_ARG | OPT_INT | OPT_SPEC |
5504  OPT_INPUT | OPT_OUTPUT, { .off = OFFSET(audio_sample_rate) },
5505  "set audio sampling rate (in Hz)", "rate" },
5506  { "ac", OPT_AUDIO | HAS_ARG | OPT_INT | OPT_SPEC |
5507  OPT_INPUT | OPT_OUTPUT, { .off = OFFSET(audio_channels) },
5508  "set number of audio channels", "channels" },
5509  { "an", OPT_AUDIO | OPT_BOOL | OPT_OFFSET | OPT_INPUT | OPT_OUTPUT,{ .off = OFFSET(audio_disable) },
5510  "disable audio" },
5511  { "acodec", OPT_AUDIO | HAS_ARG | OPT_PERFILE |
5512  OPT_INPUT | OPT_OUTPUT, { .func_arg = opt_audio_codec },
5513  "force audio codec ('copy' to copy stream)", "codec" },
5514  { "atag", OPT_AUDIO | HAS_ARG | OPT_EXPERT | OPT_PERFILE |
5515  OPT_OUTPUT, { .func_arg = opt_old2new },
5516  "force audio tag/fourcc", "fourcc/tag" },
5517  { "vol", OPT_AUDIO | HAS_ARG | OPT_INT, { &audio_volume },
5518  "change audio volume (256=normal)" , "volume" },
5519  { "sample_fmt", OPT_AUDIO | HAS_ARG | OPT_EXPERT | OPT_SPEC |
5520  OPT_STRING | OPT_INPUT | OPT_OUTPUT, { .off = OFFSET(sample_fmts) },
5521  "set sample format", "format" },
5522  { "channel_layout", OPT_AUDIO | HAS_ARG | OPT_EXPERT | OPT_PERFILE |
5523  OPT_INPUT | OPT_OUTPUT, { .func_arg = opt_channel_layout },
5524  "set channel layout", "layout" },
5525  { "af", OPT_AUDIO | HAS_ARG | OPT_PERFILE | OPT_OUTPUT, { .func_arg = opt_audio_filters },
5526  "set audio filters", "filter_graph" },
5527  { "guess_layout_max", OPT_AUDIO | HAS_ARG | OPT_INT | OPT_SPEC | OPT_EXPERT | OPT_INPUT, { .off = OFFSET(guess_layout_max) },
5528  "set the maximum number of channels to try to guess the channel layout" },
5529 
5530  /* subtitle options */
5531  { "sn", OPT_SUBTITLE | OPT_BOOL | OPT_OFFSET | OPT_INPUT | OPT_OUTPUT, { .off = OFFSET(subtitle_disable) },
5532  "disable subtitle" },
5533  { "scodec", OPT_SUBTITLE | HAS_ARG | OPT_PERFILE | OPT_INPUT | OPT_OUTPUT, { .func_arg = opt_subtitle_codec },
5534  "force subtitle codec ('copy' to copy stream)", "codec" },
5535  { "stag", OPT_SUBTITLE | HAS_ARG | OPT_EXPERT | OPT_PERFILE | OPT_OUTPUT, { .func_arg = opt_old2new }
5536  , "force subtitle tag/fourcc", "fourcc/tag" },
5537  { "fix_sub_duration", OPT_BOOL | OPT_EXPERT | OPT_SUBTITLE | OPT_SPEC | OPT_INPUT, { .off = OFFSET(fix_sub_duration) },
5538  "fix subtitles duration" },
5539  { "canvas_size", OPT_SUBTITLE | HAS_ARG | OPT_STRING | OPT_SPEC | OPT_INPUT, { .off = OFFSET(canvas_sizes) },
5540  "set canvas size (WxH or abbreviation)", "size" },
5541 
5542  /* grab options */
5543  { "vc", HAS_ARG | OPT_EXPERT | OPT_VIDEO, { .func_arg = opt_video_channel },
5544  "deprecated, use -channel", "channel" },
5545  { "tvstd", HAS_ARG | OPT_EXPERT | OPT_VIDEO, { .func_arg = opt_video_standard },
5546  "deprecated, use -standard", "standard" },
5547  { "isync", OPT_BOOL | OPT_EXPERT, { &input_sync }, "this option is deprecated and does nothing", "" },
5548 
5549  /* muxer options */
5550  { "muxdelay", OPT_FLOAT | HAS_ARG | OPT_EXPERT | OPT_OFFSET | OPT_OUTPUT, { .off = OFFSET(mux_max_delay) },
5551  "set the maximum demux-decode delay", "seconds" },
5552  { "muxpreload", OPT_FLOAT | HAS_ARG | OPT_EXPERT | OPT_OFFSET | OPT_OUTPUT, { .off = OFFSET(mux_preload) },
5553  "set the initial demux-decode delay", "seconds" },
5554  { "sdp_file", HAS_ARG | OPT_EXPERT | OPT_OUTPUT, { .func_arg = opt_sdp_file },
5555  "specify a file in which to print sdp information", "file" },
5556 
5557  { "time_base", HAS_ARG | OPT_STRING | OPT_EXPERT | OPT_SPEC | OPT_OUTPUT, { .off = OFFSET(time_bases) },
5558  "set the desired time base hint for output stream (1:24, 1:48000 or 0.04166, 2.0833e-5)", "ratio" },
5559  { "enc_time_base", HAS_ARG | OPT_STRING | OPT_EXPERT | OPT_SPEC | OPT_OUTPUT, { .off = OFFSET(enc_time_bases) },
5560  "set the desired time base for the encoder (1:24, 1:48000 or 0.04166, 2.0833e-5). "
5561  "two special values are defined - "
5562  "0 = use frame rate (video) or sample rate (audio),"
5563  "-1 = match source time base", "ratio" },
5564 
5565  { "bsf", HAS_ARG | OPT_STRING | OPT_SPEC | OPT_EXPERT | OPT_OUTPUT, { .off = OFFSET(bitstream_filters) },
5566  "A comma-separated list of bitstream filters", "bitstream_filters" },
5567  { "absf", HAS_ARG | OPT_AUDIO | OPT_EXPERT| OPT_PERFILE | OPT_OUTPUT, { .func_arg = opt_old2new },
5568  "deprecated", "audio bitstream_filters" },
5569  { "vbsf", OPT_VIDEO | HAS_ARG | OPT_EXPERT| OPT_PERFILE | OPT_OUTPUT, { .func_arg = opt_old2new },
5570  "deprecated", "video bitstream_filters" },
5571 
5572  { "apre", HAS_ARG | OPT_AUDIO | OPT_EXPERT| OPT_PERFILE | OPT_OUTPUT, { .func_arg = opt_preset },
5573  "set the audio options to the indicated preset", "preset" },
5574  { "vpre", OPT_VIDEO | HAS_ARG | OPT_EXPERT| OPT_PERFILE | OPT_OUTPUT, { .func_arg = opt_preset },
5575  "set the video options to the indicated preset", "preset" },
5576  { "spre", HAS_ARG | OPT_SUBTITLE | OPT_EXPERT| OPT_PERFILE | OPT_OUTPUT, { .func_arg = opt_preset },
5577  "set the subtitle options to the indicated preset", "preset" },
5578  { "fpre", HAS_ARG | OPT_EXPERT| OPT_PERFILE | OPT_OUTPUT, { .func_arg = opt_preset },
5579  "set options from indicated preset file", "filename" },
5580 
5581  { "max_muxing_queue_size", HAS_ARG | OPT_INT | OPT_SPEC | OPT_EXPERT | OPT_OUTPUT, { .off = OFFSET(max_muxing_queue_size) },
5582  "maximum number of packets that can be buffered while waiting for all streams to initialize", "packets" },
5583  { "muxing_queue_data_threshold", HAS_ARG | OPT_INT | OPT_SPEC | OPT_EXPERT | OPT_OUTPUT, { .off = OFFSET(muxing_queue_data_threshold) },
5584  "set the threshold after which max_muxing_queue_size is taken into account", "bytes" },
5585 
5586  /* data codec support */
5587  { "dcodec", HAS_ARG | OPT_DATA | OPT_PERFILE | OPT_EXPERT | OPT_INPUT | OPT_OUTPUT, { .func_arg = opt_data_codec },
5588  "force data codec ('copy' to copy stream)", "codec" },
5589  { "dn", OPT_BOOL | OPT_VIDEO | OPT_OFFSET | OPT_INPUT | OPT_OUTPUT, { .off = OFFSET(data_disable) },
5590  "disable data" },
5591 
5592  #if CONFIG_VAAPI
5593  { "vaapi_device", HAS_ARG | OPT_EXPERT, { .func_arg = opt_vaapi_device },
5594  "set VAAPI hardware device (DRM path or X11 display name)", "device" },
5595  #endif
5596 
5597  #if CONFIG_QSV
5598  { "qsv_device", HAS_ARG | OPT_STRING | OPT_EXPERT, { &qsv_device },
5599  "set QSV hardware device (DirectX adapter index, DRM path or X11 display name)", "device"},
5600  #endif
5601 
5602  { "init_hw_device", HAS_ARG | OPT_EXPERT, { .func_arg = opt_init_hw_device },
5603  "initialise hardware device", "args" },
5604  { "filter_hw_device", HAS_ARG | OPT_EXPERT, { .func_arg = opt_filter_hw_device },
5605  "set hardware device used when filtering", "device" },
5606 
5607  { NULL, },
5608  };
5609 
5610  ffmpeg_options = options;
5611 
5612  int i, ret;
5614 
5615  int savedCode = setjmp(ex_buf__);
5616  if (savedCode == 0) {
5617 
5619 
5620  init_dynload();
5621 
5623 
5624  setvbuf(stderr,NULL,_IONBF,0); /* win32 runtime needs this */
5625 
5626  av_log_set_flags(AV_LOG_SKIP_REPEATED);
5627  parse_loglevel(argc, argv, options);
5628 
5629  if(argc>1 && !strcmp(argv[1], "-d")){
5630  run_as_daemon=1;
5631  av_log_set_callback(log_callback_null);
5632  argc--;
5633  argv++;
5634  }
5635 
5636  #if CONFIG_AVDEVICE
5637  avdevice_register_all();
5638  #endif
5639  avformat_network_init();
5640 
5641  show_banner(argc, argv, options);
5642 
5643  /* parse options and open all input/output files */
5644  ret = ffmpeg_parse_options(argc, argv);
5645  if (ret < 0)
5646  exit_program(1);
5647 
5648  if (nb_output_files <= 0 && nb_input_files == 0) {
5649  show_usage();
5650  av_log(NULL, AV_LOG_WARNING, "Use -h to get full help or, even better, run 'man %s'\n", program_name);
5651  exit_program(1);
5652  }
5653 
5654  /* file converter / grab */
5655  if (nb_output_files <= 0) {
5656  av_log(NULL, AV_LOG_FATAL, "At least one output file must be specified\n");
5657  exit_program(1);
5658  }
5659 
5660  for (i = 0; i < nb_output_files; i++) {
5661  if (strcmp(output_files[i]->ctx->oformat->name, "rtp"))
5662  want_sdp = 0;
5663  }
5664 
5666  if (transcode() < 0)
5667  exit_program(1);
5668  if (do_benchmark) {
5669  int64_t utime, stime, rtime;
5671  utime = current_time.user_usec - ti.user_usec;
5672  stime = current_time.sys_usec - ti.sys_usec;
5673  rtime = current_time.real_usec - ti.real_usec;
5674  av_log(NULL, AV_LOG_INFO,
5675  "bench: utime=%0.3fs stime=%0.3fs rtime=%0.3fs\n",
5676  utime / 1000000.0, stime / 1000000.0, rtime / 1000000.0);
5677  }
5678  av_log(NULL, AV_LOG_DEBUG, "%"PRIu64" frames successfully decoded, %"PRIu64" decoding errors\n",
5681  exit_program(69);
5682 
5684 
5685  } else {
5687  }
5688 
5689  return main_ffmpeg_return_code;
5690 }
__thread jmp_buf ex_buf__
void exit_program(int ret)
int show_decoders(void *optctx, const char *opt, const char *arg)
int opt_loglevel(void *optctx, const char *opt, const char *arg)
int opt_cpuflags(void *optctx, const char *opt, const char *arg)
void init_dynload(void)
int show_help(void *optctx, const char *opt, const char *arg)
void print_error(const char *filename, int err)
int show_filters(void *optctx, const char *opt, const char *arg)
int show_sample_fmts(void *optctx, const char *opt, const char *arg)
int show_muxers(void *optctx, const char *opt, const char *arg)
int show_bsfs(void *optctx, const char *opt, const char *arg)
__thread char * program_name
int show_layouts(void *optctx, const char *opt, const char *arg)
int show_encoders(void *optctx, const char *opt, const char *arg)
int show_version(void *optctx, const char *opt, const char *arg)
void parse_loglevel(int argc, char **argv, const OptionDef *options)
__thread int program_birth_year
void show_banner(int argc, char **argv, const OptionDef *options)
int opt_timelimit(void *optctx, const char *opt, const char *arg)
int show_license(void *optctx, const char *opt, const char *arg)
int show_codecs(void *optctx, const char *opt, const char *arg)
int show_buildconf(void *optctx, const char *opt, const char *arg)
int64_t parse_time_or_die(const char *context, const char *timestr, int is_duration)
void register_exit(void(*cb)(int ret))
int show_devices(void *optctx, const char *opt, const char *arg)
void uninit_opts(void)
int show_formats(void *optctx, const char *opt, const char *arg)
__thread int hide_banner
int show_protocols(void *optctx, const char *opt, const char *arg)
int opt_max_alloc(void *optctx, const char *opt, const char *arg)
int opt_report(void *optctx, const char *opt, const char *arg)
int show_colors(void *optctx, const char *opt, const char *arg)
int show_pix_fmts(void *optctx, const char *opt, const char *arg)
int show_demuxers(void *optctx, const char *opt, const char *arg)
#define OPT_VIDEO
#define OPT_SPEC
#define OPT_BOOL
#define media_type_string
#define OPT_INT64
#define OPT_PERFILE
#define OPT_INT
#define OPT_FLOAT
#define AV_LOG_STDERR
#define OPT_INPUT
#define OPT_DOUBLE
#define OPT_STRING
__thread int find_stream_info
#define OPT_AUDIO
#define OPT_DATA
#define OPT_SUBTITLE
#define OPT_EXPERT
#define OPT_EXIT
#define OPT_OUTPUT
#define OPT_TIME
#define OPT_OFFSET
#define HAS_ARG
int opt_channel_layout(void *optctx, const char *opt, const char *arg)
__thread unsigned dup_warning
int opt_sdp_file(void *optctx, const char *opt, const char *arg)
static int transcode(void)
static int transcode_subtitles(InputStream *ist, AVPacket *pkt, int *got_output, int *decode_failed)
int opt_timecode(void *optctx, const char *opt, const char *arg)
__thread OptionDef * ffmpeg_options
int opt_vstats_file(void *optctx, const char *opt, const char *arg)
__thread InputStream ** input_streams
static void set_tty_echo(int on)
static int decode_audio(InputStream *ist, AVPacket *pkt, int *got_output, int *decode_failed)
__thread const AVIOInterruptCB int_cb
__thread int run_as_daemon
static int check_keyboard_interaction(int64_t cur_time)
static void do_audio_out(OutputFile *of, OutputStream *ost, AVFrame *frame)
static BenchmarkTimeStamps get_benchmark_time_stamps(void)
int opt_data_codec(void *optctx, const char *opt, const char *arg)
int opt_streamid(void *optctx, const char *opt, const char *arg)
__thread int nb_input_streams
static int need_output(void)
void term_exit(void)
__thread volatile long sessionId
Definition: ffmpegkit.c:105
static volatile int received_sigterm
const char *const forced_keyframes_const_names[]
int opt_qscale(void *optctx, const char *opt, const char *arg)
int opt_sameq(void *optctx, const char *opt, const char *arg)
__thread OutputStream ** output_streams
static int decode(AVCodecContext *avctx, AVFrame *frame, int *got_frame, AVPacket *pkt)
__thread OutputFile ** output_files
int opt_filter_complex_script(void *optctx, const char *opt, const char *arg)
static void forward_report(int is_last_report, int64_t timer_start, int64_t cur_time)
int opt_filter_complex(void *optctx, const char *opt, const char *arg)
static int process_input_packet(InputStream *ist, const AVPacket *pkt, int no_eof)
static void do_subtitle_out(OutputFile *of, OutputStream *ost, AVSubtitle *sub)
int opt_vsync(void *optctx, const char *opt, const char *arg)
static int init_input_stream(int ist_index, char *error, int error_len)
__thread int nb_output_streams
static void sub2video_push_ref(InputStream *ist, int64_t pts)
int guess_input_channel_layout(InputStream *ist)
__thread volatile int longjmp_value
static void print_sdp(void)
__thread int nb_frames_dup
static int reap_filters(int flush)
static int check_recording_time(OutputStream *ost)
static void abort_codec_experimental(AVCodec *c, int encoder)
static void do_video_out(OutputFile *of, OutputStream *ost, AVFrame *next_picture)
__thread BenchmarkTimeStamps current_time
__thread int nb_input_files
static enum AVPixelFormat get_format(AVCodecContext *s, const enum AVPixelFormat *pix_fmts)
static void print_final_stats(int64_t total_size)
static int transcode_from_filter(FilterGraph *graph, InputStream **best_ist)
__thread int first_report
__thread int nb_output_files
static double psnr(double d)
static int init_output_bsfs(OutputStream *ost)
volatile int handleSIGINT
Definition: ffmpegkit.c:99
static int init_output_stream_encode(OutputStream *ost, AVFrame *frame)
static int send_frame_to_filters(InputStream *ist, AVFrame *decoded_frame)
int opt_subtitle_codec(void *optctx, const char *opt, const char *arg)
volatile int handleSIGTERM
Definition: ffmpegkit.c:100
int opt_video_standard(void *optctx, const char *opt, const char *arg)
void set_report_callback(void(*callback)(int, float, float, int64_t, int, double, double))
int opt_profile(void *optctx, const char *opt, const char *arg)
struct BenchmarkTimeStamps BenchmarkTimeStamps
static int64_t getmaxrss(void)
int opt_abort_on(void *optctx, const char *opt, const char *arg)
static void reset_eagain(void)
__thread int copy_unknown_streams
static void finish_output_stream(OutputStream *ost)
int opt_video_codec(void *optctx, const char *opt, const char *arg)
static void parse_forced_key_frames(char *kf, OutputStream *ost, AVCodecContext *avctx)
int opt_data_frames(void *optctx, const char *opt, const char *arg)
static OutputStream * choose_output(void)
int opt_video_filters(void *optctx, const char *opt, const char *arg)
static int compare_int64(const void *a, const void *b)
__thread int input_sync
__thread volatile int ffmpeg_exited
int opt_attach(void *optctx, const char *opt, const char *arg)
int opt_filter_hw_device(void *optctx, const char *opt, const char *arg)
static int init_output_stream_wrapper(OutputStream *ost, AVFrame *frame, unsigned int fatal)
static int decode_video(InputStream *ist, AVPacket *pkt, int *got_output, int64_t *duration_pts, int eof, int *decode_failed)
__thread atomic_int transcode_init_done
static int ifilter_send_frame(InputFilter *ifilter, AVFrame *frame)
static void check_decode_result(InputStream *ist, int *got_output, int ret)
__thread int64_t keyboard_last_time
static void sub2video_heartbeat(InputStream *ist, int64_t pts)
int opt_audio_frames(void *optctx, const char *opt, const char *arg)
static int process_input(int file_index)
static void log_callback_null(void *ptr, int level, const char *fmt, va_list vl)
static int seek_to_start(InputFile *ifile, AVFormatContext *is)
__thread int no_file_overwrite
int opt_target(void *optctx, const char *opt, const char *arg)
__thread int qp_histogram[52]
static int check_output_constraints(InputStream *ist, OutputStream *ost)
__thread int file_overwrite
int opt_map(void *optctx, const char *opt, const char *arg)
static void write_packet(OutputFile *of, AVPacket *pkt, OutputStream *ost, int unqueue)
void cancel_operation(long id)
static void set_encoder_id(OutputFile *of, OutputStream *ost)
__thread AVIOContext * progress_avio
__thread int64_t copy_ts_first_pts
__thread InputFile ** input_files
static int read_key(void)
static int check_init_output_file(OutputFile *of, int file_index)
static void close_output_stream(OutputStream *ost)
static int get_buffer(AVCodecContext *s, AVFrame *frame, int flags)
void sub2video_update(InputStream *ist, int64_t heartbeat_pts, AVSubtitle *sub)
int opt_vstats(void *optctx, const char *opt, const char *arg)
int opt_video_channel(void *optctx, const char *opt, const char *arg)
__thread FilterGraph ** filtergraphs
static int init_output_stream(OutputStream *ost, AVFrame *frame, char *error, int error_len)
static int got_eagain(void)
int opt_video_frames(void *optctx, const char *opt, const char *arg)
static int get_input_packet(InputFile *f, AVPacket *pkt)
__thread unsigned nb_output_dumped
static int send_filter_eof(InputStream *ist)
int decode_interrupt_cb(void *ctx)
int opt_audio_filters(void *optctx, const char *opt, const char *arg)
static void term_exit_sigsafe(void)
static void sub2video_flush(InputStream *ist)
static void sub2video_copy_rect(uint8_t *dst, int dst_linesize, int w, int h, AVSubtitleRect *r)
void remove_avoptions(AVDictionary **a, AVDictionary *b)
static int transcode_init(void)
void cancelSession(long id)
Definition: ffmpegkit.c:407
static volatile int received_nb_signals
int opt_audio_qscale(void *optctx, const char *opt, const char *arg)
int cancelRequested(long id)
Definition: ffmpegkit.c:417
static FILE * vstats_file
void(* report_callback)(int, float, float, int64_t, int, double, double)
static void report_new_stream(int input_index, AVPacket *pkt)
__thread int64_t decode_error_stat[2]
__thread volatile int main_ffmpeg_return_code
int show_hwaccels(void *optctx, const char *opt, const char *arg)
static void update_benchmark(const char *fmt,...)
static AVRational duration_max(int64_t tmp, int64_t *duration, AVRational tmp_time_base, AVRational time_base)
static void ffmpeg_cleanup(int ret)
int opt_preset(void *optctx, const char *opt, const char *arg)
static double adjust_frame_pts_to_encoder_tb(OutputFile *of, OutputStream *ost, AVFrame *frame)
static int transcode_step(void)
static int ifilter_has_all_input_formats(FilterGraph *fg)
static void close_all_output_streams(OutputStream *ost, OSTFinished this_stream, OSTFinished others)
int opt_old2new(void *optctx, const char *opt, const char *arg)
static void print_report(int is_last_report, int64_t timer_start, int64_t cur_time)
__thread int nb_filtergraphs
static int init_output_stream_streamcopy(OutputStream *ost)
void term_init(void)
int opt_stats_period(void *optctx, const char *opt, const char *arg)
__thread int64_t last_time
volatile int handleSIGPIPE
Definition: ffmpegkit.c:102
#define OFFSET(x)
int opt_bitrate(void *optctx, const char *opt, const char *arg)
volatile int handleSIGXCPU
Definition: ffmpegkit.c:101
static void do_video_stats(OutputStream *ost, int frame_size)
static void do_streamcopy(InputStream *ist, OutputStream *ost, const AVPacket *pkt)
__thread uint8_t * subtitle_out
static int sub2video_get_blank_frame(InputStream *ist)
static void flush_encoders(void)
__thread int do_psnr
static void output_packet(OutputFile *of, AVPacket *pkt, OutputStream *ost, int eof)
static InputStream * get_input_stream(OutputStream *ost)
int opt_map_channel(void *optctx, const char *opt, const char *arg)
int opt_progress(void *optctx, const char *opt, const char *arg)
int opt_audio_codec(void *optctx, const char *opt, const char *arg)
int opt_recording_timestamp(void *optctx, const char *opt, const char *arg)
void assert_avoptions(AVDictionary *m)
__thread int want_sdp
void ffmpeg_var_cleanup()
static void init_encoder_time_base(OutputStream *ost, AVRational default_time_base)
int ffmpeg_execute(int argc, char **argv)
static void ifilter_parameters_from_codecpar(InputFilter *ifilter, AVCodecParameters *par)
static int ifilter_send_eof(InputFilter *ifilter, int64_t pts)
__thread int ignore_unknown_streams
static void sigterm_handler(int sig)
__thread int intra_only
volatile int handleSIGQUIT
Definition: ffmpegkit.c:98
int opt_init_hw_device(void *optctx, const char *opt, const char *arg)
__thread int nb_frames_drop
__thread float dts_delta_threshold
int hw_device_setup_for_encode(OutputStream *ost)
__thread int copy_tb
@ HWACCEL_GENERIC
@ HWACCEL_AUTO
__thread int frame_bits_per_raw_sample
int ifilter_parameters_from_frame(InputFilter *ifilter, const AVFrame *frame)
#define VSYNC_DROP
__thread int64_t stats_period
__thread char * sdp_filename
__thread int print_stats
__thread int video_sync_method
__thread int filter_complex_nbthreads
__thread int abort_on_flags
__thread int audio_volume
__thread float max_error_rate
__thread int copy_ts
__thread int stdin_interaction
__thread float dts_error_threshold
int hwaccel_decode_init(AVCodecContext *avctx)
OSTFinished
@ ENCODER_FINISHED
@ MUXER_FINISHED
#define VSYNC_CFR
__thread int filter_nbthreads
void show_usage(void)
#define DECODING_FOR_FILTER
__thread int do_benchmark
__thread float frame_drop_threshold
__thread int vstats_version
__thread char * vstats_filename
__thread int do_deinterlace
int hw_device_setup_for_decode(InputStream *ist)
#define VSYNC_AUTO
void hw_device_free_all(void)
__thread int audio_sync_method
__thread float audio_drift_threshold
__thread int do_benchmark_all
__thread int start_at_zero
@ FKF_PREV_FORCED_N
@ FKF_T
@ FKF_PREV_FORCED_T
@ FKF_N_FORCED
@ FKF_N
__thread int exit_on_error
int ffmpeg_parse_options(int argc, char **argv)
#define ABORT_ON_FLAG_EMPTY_OUTPUT
#define DECODING_FOR_OST
__thread int qp_hist
#define VSYNC_VSCFR
int filtergraph_is_simple(FilterGraph *fg)
#define VSYNC_PASSTHROUGH
int configure_filtergraph(FilterGraph *fg)
__thread int do_hex_dump
__thread int do_pkt_dump
const HWAccel hwaccels[]
#define VSYNC_VFR
void dump_attachment(AVStream *st, const char *filename)
__thread int debug_ts
__thread char * videotoolbox_pixfmt
__thread int auto_conversion_filters
#define ABORT_ON_FLAG_EMPTY_OUTPUT_STREAM
#define avio_open2
Definition: saf_wrapper.h:40
#define avio_closep
Definition: saf_wrapper.h:30
#define avformat_close_input
Definition: saf_wrapper.h:33
OutputFilter ** outputs
const char * graph_desc
AVFilterGraph * graph
InputFilter ** inputs
enum HWAccelID id
int(* init)(AVCodecContext *s)
const char * name
int64_t ts_offset
int64_t duration
AVFormatContext * ctx
int64_t input_ts_offset
int64_t recording_time
AVRational time_base
int nb_streams_warn
int64_t last_ts
int64_t start_time
AVBufferRef * hw_frames_ctx
uint8_t * name
struct InputStream * ist
AVFilterContext * filter
enum AVMediaType type
AVFifoBuffer * frame_queue
uint64_t channel_layout
struct FilterGraph * graph
AVRational sample_aspect_ratio
unsigned int initialize
marks if sub2video_update should force an initialization
AVFifoBuffer * sub_queue
queue of AVSubtitle* before filter init
enum AVPixelFormat hwaccel_pix_fmt
AVFrame * decoded_frame
int64_t * dts_buffer
int64_t dts
dts of the last packet read for this stream (in AV_TIME_BASE units)
int64_t cfr_next_pts
void(* hwaccel_uninit)(AVCodecContext *s)
AVCodecContext * dec_ctx
int64_t pts
current pts of the decoded frame (in AV_TIME_BASE units)
AVBufferRef * hw_frames_ctx
AVCodec * dec
struct InputStream::@2 prev_sub
enum HWAccelID hwaccel_id
uint64_t data_size
int(* hwaccel_get_buffer)(AVCodecContext *s, AVFrame *frame, int flags)
int64_t filter_in_rescale_delta_last
int64_t next_dts
int wrap_correction_done
int64_t max_pts
enum AVPixelFormat hwaccel_retrieved_pix_fmt
AVFrame * filter_frame
uint64_t samples_decoded
int64_t next_pts
synthetic pts for the next decode frame (in AV_TIME_BASE units)
uint64_t frames_decoded
struct InputStream::sub2video sub2video
AVStream * st
InputFilter ** filters
int(* hwaccel_retrieve_data)(AVCodecContext *s, AVFrame *frame)
uint64_t nb_packets
AVSubtitle subtitle
char * hwaccel_device
AVDictionary * decoder_opts
int64_t min_pts
enum AVHWDeviceType hwaccel_device_type
int64_t nb_samples
AVRational framerate
uint64_t limit_filesize
AVFormatContext * ctx
int64_t start_time
start time in microseconds == AV_TIME_BASE units
AVDictionary * opts
int64_t recording_time
desired length of the resulting file in microseconds == AV_TIME_BASE units
AVFilterInOut * out_tmp
struct OutputStream * ost
uint64_t * channel_layouts
AVFilterContext * filter
uint8_t * name
struct FilterGraph * graph
int max_muxing_queue_size
AVDictionary * swr_opts
int copy_initial_nonkeyframes
int64_t last_mux_dts
AVRational mux_timebase
double forced_keyframes_expr_const_values[FKF_NB]
OSTFinished finished
int * audio_channels_map
AVRational frame_aspect_ratio
double rotate_override_value
AVFrame * last_frame
int audio_channels_mapped
int64_t sync_opts
int64_t * forced_kf_pts
int64_t error[4]
uint64_t packets_written
uint64_t frames_encoded
int64_t max_frames
size_t muxing_queue_data_threshold
AVDictionary * resample_opts
AVRational max_frame_rate
AVRational enc_timebase
AVFifoBuffer * muxing_queue
AVCodecParameters * ref_par
char * forced_keyframes
AVFrame * filtered_frame
const char * attachment_filename
AVRational frame_rate
AVCodecContext * enc_ctx
struct InputStream * sync_ist
AVDictionary * encoder_opts
uint64_t data_size
AVStream * st
char * filters
filtergraph associated to the -filter option
int64_t forced_kf_ref_pts
uint64_t samples_encoded
char * filters_script
filtergraph script associated to the -filter_script option
AVBSFContext * bsf_ctx
int64_t first_pts
AVCodec * enc
AVDictionary * sws_dict
OutputFilter * filter
char * disposition
AVExpr * forced_keyframes_pexpr
size_t muxing_queue_data_size
int last_nb0_frames[3]
char * logfile_prefix