FFmpegKit Android API 4.5
fftools_ffmpeg.c
Go to the documentation of this file.
1/*
2 * Copyright (c) 2000-2003 Fabrice Bellard
3 *
4 * This file is part of FFmpeg.
5 *
6 * FFmpeg is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU Lesser General Public
8 * License as published by the Free Software Foundation; either
9 * version 2.1 of the License, or (at your option) any later version.
10 *
11 * FFmpeg is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14 * Lesser General Public License for more details.
15 *
16 * You should have received a copy of the GNU Lesser General Public
17 * License along with FFmpeg; if not, write to the Free Software
18 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
19 */
20
26/*
27 * CHANGES 01.2021
28 * - NDK r22 incompatibility issues regarding INT64_MAX fixed
29 *
30 * CHANGES 06.2020
31 * - ignoring signals implemented
32 * - cancel_operation() method signature updated with id
33 * - cancel by execution id implemented
34 *
35 * CHANGES 01.2020
36 * - ffprobe support changes
37 *
38 * CHANGES 12.2019
39 * - concurrent execution support
40 *
41 * CHANGES 08.2018
42 * --------------------------------------------------------
43 * - fftools_ prefix added to file name and parent headers
44 * - forward_report() method, report_callback function pointer and set_report_callback() setter
45 * method added to forward stats.
46 * - forward_report() call added from print_report()
47 * - cancel_operation() method added to trigger sigterm_handler
48 * - (!received_sigterm) validation added inside ifilter_send_eof() to complete cancellation
49 *
50 * CHANGES 07.2018
51 * --------------------------------------------------------
52 * - main() function renamed as ffmpeg_execute()
53 * - exit_program() implemented with setjmp
54 * - extern longjmp_value added to access exit code stored in exit_program()
55 * - ffmpeg_var_cleanup() method added
56 */
57
58#include "config.h"
59#include <ctype.h>
60#include <string.h>
61#include <math.h>
62#include <stdlib.h>
63#include <errno.h>
64#include <limits.h>
65#include <stdatomic.h>
66#include <stdint.h>
67
68#include "ffmpegkit_exception.h"
69
70#if HAVE_IO_H
71#include <io.h>
72#endif
73#if HAVE_UNISTD_H
74#include <unistd.h>
75#endif
76
77#include "libavformat/avformat.h"
78#include "libavdevice/avdevice.h"
79#include "libswresample/swresample.h"
80#include "libavutil/opt.h"
81#include "libavutil/channel_layout.h"
82#include "libavutil/parseutils.h"
83#include "libavutil/samplefmt.h"
84#include "libavutil/fifo.h"
85#include "libavutil/hwcontext.h"
86#include "libavutil/internal.h"
87#include "libavutil/intreadwrite.h"
88#include "libavutil/dict.h"
89#include "libavutil/display.h"
90#include "libavutil/mathematics.h"
91#include "libavutil/pixdesc.h"
92#include "libavutil/avstring.h"
93#include "libavutil/libm.h"
94#include "libavutil/imgutils.h"
95#include "libavutil/timestamp.h"
96#include "libavutil/bprint.h"
97#include "libavutil/time.h"
98#include "libavutil/thread.h"
99#include "libavutil/threadmessage.h"
100#include "libavcodec/mathops.h"
101#include "libavformat/os_support.h"
102
103# include "libavfilter/avfilter.h"
104# include "libavfilter/buffersrc.h"
105# include "libavfilter/buffersink.h"
106
107#if HAVE_SYS_RESOURCE_H
108#include <sys/time.h>
109#include <sys/types.h>
110#include <sys/resource.h>
111#elif HAVE_GETPROCESSTIMES
112#include <windows.h>
113#endif
114#if HAVE_GETPROCESSMEMORYINFO
115#include <windows.h>
116#include <psapi.h>
117#endif
118#if HAVE_SETCONSOLECTRLHANDLER
119#include <windows.h>
120#endif
121
122
123#if HAVE_SYS_SELECT_H
124#include <sys/select.h>
125#endif
126
127#if HAVE_TERMIOS_H
128#include <fcntl.h>
129#include <sys/ioctl.h>
130#include <sys/time.h>
131#include <termios.h>
132#elif HAVE_KBHIT
133#include <conio.h>
134#endif
135
136#include <time.h>
137
138#include "fftools_ffmpeg.h"
139#include "fftools_cmdutils.h"
140
141#include "libavutil/avassert.h"
142
143static FILE *vstats_file;
144
145const char *const forced_keyframes_const_names[] = {
146 "n",
147 "n_forced",
148 "prev_forced_n",
149 "prev_forced_t",
150 "t",
151 NULL
152};
153
154typedef struct BenchmarkTimeStamps {
155 int64_t real_usec;
156 int64_t user_usec;
157 int64_t sys_usec;
159
160static void do_video_stats(OutputStream *ost, int frame_size);
162static int64_t getmaxrss(void);
164
165__thread int run_as_daemon = 0;
166__thread int nb_frames_dup = 0;
167__thread unsigned dup_warning = 1000;
168__thread int nb_frames_drop = 0;
169__thread int64_t decode_error_stat[2];
170__thread unsigned nb_output_dumped = 0;
171
172__thread int want_sdp = 1;
173
175__thread AVIOContext *progress_avio = NULL;
176
177__thread uint8_t *subtitle_out;
178
179__thread InputStream **input_streams = NULL;
180__thread int nb_input_streams = 0;
181__thread InputFile **input_files = NULL;
182__thread int nb_input_files = 0;
183
184__thread OutputStream **output_streams = NULL;
185__thread int nb_output_streams = 0;
186__thread OutputFile **output_files = NULL;
187__thread int nb_output_files = 0;
188
190__thread int nb_filtergraphs;
191
192__thread int64_t last_time = -1;
193__thread int64_t keyboard_last_time = 0;
194__thread int first_report = 1;
195__thread int qp_histogram[52];
196
197void (*report_callback)(int, float, float, int64_t, int, double, double) = NULL;
198
199extern __thread int file_overwrite;
200extern __thread int no_file_overwrite;
201extern __thread int ignore_unknown_streams;
202extern __thread int copy_unknown_streams;
203extern int opt_map(void *optctx, const char *opt, const char *arg);
204extern int opt_map_channel(void *optctx, const char *opt, const char *arg);
205extern int opt_recording_timestamp(void *optctx, const char *opt, const char *arg);
206extern int opt_data_frames(void *optctx, const char *opt, const char *arg);
207extern int opt_progress(void *optctx, const char *opt, const char *arg);
208extern int opt_target(void *optctx, const char *opt, const char *arg);
209extern int opt_vsync(void *optctx, const char *opt, const char *arg);
210extern int opt_abort_on(void *optctx, const char *opt, const char *arg);
211extern int opt_stats_period(void *optctx, const char *opt, const char *arg);
212extern int opt_qscale(void *optctx, const char *opt, const char *arg);
213extern int opt_profile(void *optctx, const char *opt, const char *arg);
214extern int opt_filter_complex(void *optctx, const char *opt, const char *arg);
215extern int opt_filter_complex_script(void *optctx, const char *opt, const char *arg);
216extern int opt_attach(void *optctx, const char *opt, const char *arg);
217extern int opt_video_frames(void *optctx, const char *opt, const char *arg);
218extern __thread int intra_only;
219extern int opt_video_codec(void *optctx, const char *opt, const char *arg);
220extern int opt_sameq(void *optctx, const char *opt, const char *arg);
221extern int opt_timecode(void *optctx, const char *opt, const char *arg);
222extern __thread int do_psnr;
223extern int opt_vstats_file(void *optctx, const char *opt, const char *arg);
224extern int opt_vstats(void *optctx, const char *opt, const char *arg);
225extern int opt_video_frames(void *optctx, const char *opt, const char *arg);
226extern int opt_old2new(void *optctx, const char *opt, const char *arg);
227extern int opt_streamid(void *optctx, const char *opt, const char *arg);
228extern int opt_bitrate(void *optctx, const char *opt, const char *arg);
229extern int show_hwaccels(void *optctx, const char *opt, const char *arg);
230extern int opt_video_filters(void *optctx, const char *opt, const char *arg);
231extern int opt_audio_frames(void *optctx, const char *opt, const char *arg);
232extern int opt_audio_qscale(void *optctx, const char *opt, const char *arg);
233extern int opt_audio_codec(void *optctx, const char *opt, const char *arg);
234extern int opt_channel_layout(void *optctx, const char *opt, const char *arg);
235extern int opt_preset(void *optctx, const char *opt, const char *arg);
236extern int opt_audio_filters(void *optctx, const char *opt, const char *arg);
237extern int opt_subtitle_codec(void *optctx, const char *opt, const char *arg);
238extern int opt_video_channel(void *optctx, const char *opt, const char *arg);
239extern int opt_video_standard(void *optctx, const char *opt, const char *arg);
240extern int opt_sdp_file(void *optctx, const char *opt, const char *arg);
241extern int opt_data_codec(void *optctx, const char *opt, const char *arg);
242extern int opt_init_hw_device(void *optctx, const char *opt, const char *arg);
243extern int opt_filter_hw_device(void *optctx, const char *opt, const char *arg);
244extern __thread int input_sync;
245
246#if HAVE_TERMIOS_H
247
248/* init terminal so that we can grab keys */
249__thread struct termios oldtty;
250__thread int restore_tty;
251#endif
252
253#if HAVE_THREADS
254static void free_input_threads(void);
255#endif
256
257extern volatile int handleSIGQUIT;
258extern volatile int handleSIGINT;
259extern volatile int handleSIGTERM;
260extern volatile int handleSIGXCPU;
261extern volatile int handleSIGPIPE;
262
263extern __thread volatile long globalSessionId;
264extern void cancelSession(long sessionId);
265extern int cancelRequested(long sessionId);
266
267/* sub2video hack:
268 Convert subtitles to video with alpha to insert them in filter graphs.
269 This is a temporary solution until libavfilter gets real subtitles support.
270 */
271
273{
274 int ret;
275 AVFrame *frame = ist->sub2video.frame;
276
277 av_frame_unref(frame);
278 ist->sub2video.frame->width = ist->dec_ctx->width ? ist->dec_ctx->width : ist->sub2video.w;
279 ist->sub2video.frame->height = ist->dec_ctx->height ? ist->dec_ctx->height : ist->sub2video.h;
280 ist->sub2video.frame->format = AV_PIX_FMT_RGB32;
281 if ((ret = av_frame_get_buffer(frame, 0)) < 0)
282 return ret;
283 memset(frame->data[0], 0, frame->height * frame->linesize[0]);
284 return 0;
285}
286
287static void sub2video_copy_rect(uint8_t *dst, int dst_linesize, int w, int h,
288 AVSubtitleRect *r)
289{
290 uint32_t *pal, *dst2;
291 uint8_t *src, *src2;
292 int x, y;
293
294 if (r->type != SUBTITLE_BITMAP) {
295 av_log(NULL, AV_LOG_WARNING, "sub2video: non-bitmap subtitle\n");
296 return;
297 }
298 if (r->x < 0 || r->x + r->w > w || r->y < 0 || r->y + r->h > h) {
299 av_log(NULL, AV_LOG_WARNING, "sub2video: rectangle (%d %d %d %d) overflowing %d %d\n",
300 r->x, r->y, r->w, r->h, w, h
301 );
302 return;
303 }
304
305 dst += r->y * dst_linesize + r->x * 4;
306 src = r->data[0];
307 pal = (uint32_t *)r->data[1];
308 for (y = 0; y < r->h; y++) {
309 dst2 = (uint32_t *)dst;
310 src2 = src;
311 for (x = 0; x < r->w; x++)
312 *(dst2++) = pal[*(src2++)];
313 dst += dst_linesize;
314 src += r->linesize[0];
315 }
316}
317
318static void sub2video_push_ref(InputStream *ist, int64_t pts)
319{
320 AVFrame *frame = ist->sub2video.frame;
321 int i;
322 int ret;
323
324 av_assert1(frame->data[0]);
325 ist->sub2video.last_pts = frame->pts = pts;
326 for (i = 0; i < ist->nb_filters; i++) {
327 ret = av_buffersrc_add_frame_flags(ist->filters[i]->filter, frame,
328 AV_BUFFERSRC_FLAG_KEEP_REF |
329 AV_BUFFERSRC_FLAG_PUSH);
330 if (ret != AVERROR_EOF && ret < 0)
331 av_log(NULL, AV_LOG_WARNING, "Error while add the frame to buffer source(%s).\n",
332 av_err2str(ret));
333 }
334}
335
336void sub2video_update(InputStream *ist, int64_t heartbeat_pts, AVSubtitle *sub)
337{
338 AVFrame *frame = ist->sub2video.frame;
339 int8_t *dst;
340 int dst_linesize;
341 int num_rects, i;
342 int64_t pts, end_pts;
343
344 if (!frame)
345 return;
346 if (sub) {
347 pts = av_rescale_q(sub->pts + sub->start_display_time * 1000LL,
348 AV_TIME_BASE_Q, ist->st->time_base);
349 end_pts = av_rescale_q(sub->pts + sub->end_display_time * 1000LL,
350 AV_TIME_BASE_Q, ist->st->time_base);
351 num_rects = sub->num_rects;
352 } else {
353 /* If we are initializing the system, utilize current heartbeat
354 PTS as the start time, and show until the following subpicture
355 is received. Otherwise, utilize the previous subpicture's end time
356 as the fall-back value. */
357 pts = ist->sub2video.initialize ?
358 heartbeat_pts : ist->sub2video.end_pts;
359 end_pts = INT64_MAX;
360 num_rects = 0;
361 }
363 av_log(ist->dec_ctx, AV_LOG_ERROR,
364 "Impossible to get a blank canvas.\n");
365 return;
366 }
367 dst = frame->data [0];
368 dst_linesize = frame->linesize[0];
369 for (i = 0; i < num_rects; i++)
370 sub2video_copy_rect(dst, dst_linesize, frame->width, frame->height, sub->rects[i]);
372 ist->sub2video.end_pts = end_pts;
373 ist->sub2video.initialize = 0;
374}
375
376static void sub2video_heartbeat(InputStream *ist, int64_t pts)
377{
378 InputFile *infile = input_files[ist->file_index];
379 int i, j, nb_reqs;
380 int64_t pts2;
381
382 /* When a frame is read from a file, examine all sub2video streams in
383 the same file and send the sub2video frame again. Otherwise, decoded
384 video frames could be accumulating in the filter graph while a filter
385 (possibly overlay) is desperately waiting for a subtitle frame. */
386 for (i = 0; i < infile->nb_streams; i++) {
387 InputStream *ist2 = input_streams[infile->ist_index + i];
388 if (!ist2->sub2video.frame)
389 continue;
390 /* subtitles seem to be usually muxed ahead of other streams;
391 if not, subtracting a larger time here is necessary */
392 pts2 = av_rescale_q(pts, ist->st->time_base, ist2->st->time_base) - 1;
393 /* do not send the heartbeat frame if the subtitle is already ahead */
394 if (pts2 <= ist2->sub2video.last_pts)
395 continue;
396 if (pts2 >= ist2->sub2video.end_pts || ist2->sub2video.initialize)
397 /* if we have hit the end of the current displayed subpicture,
398 or if we need to initialize the system, update the
399 overlayed subpicture and its start/end times */
400 sub2video_update(ist2, pts2 + 1, NULL);
401 for (j = 0, nb_reqs = 0; j < ist2->nb_filters; j++)
402 nb_reqs += av_buffersrc_get_nb_failed_requests(ist2->filters[j]->filter);
403 if (nb_reqs)
404 sub2video_push_ref(ist2, pts2);
405 }
406}
407
409{
410 int i;
411 int ret;
412
413 if (ist->sub2video.end_pts < INT64_MAX)
414 sub2video_update(ist, INT64_MAX, NULL);
415 for (i = 0; i < ist->nb_filters; i++) {
416 ret = av_buffersrc_add_frame(ist->filters[i]->filter, NULL);
417 if (ret != AVERROR_EOF && ret < 0)
418 av_log(NULL, AV_LOG_WARNING, "Flush the frame error.\n");
419 }
420}
421
422/* end of sub2video hack */
423
424static void term_exit_sigsafe(void)
425{
426#if HAVE_TERMIOS_H
427 if(restore_tty)
428 tcsetattr (0, TCSANOW, &oldtty);
429#endif
430}
431
432void term_exit(void)
433{
434 av_log(NULL, AV_LOG_QUIET, "%s", "");
436}
437
438static volatile int received_sigterm = 0;
439static volatile int received_nb_signals = 0;
440__thread atomic_int transcode_init_done = ATOMIC_VAR_INIT(0);
441__thread volatile int ffmpeg_exited = 0;
442__thread volatile int main_ffmpeg_return_code = 0;
443__thread int64_t copy_ts_first_pts = AV_NOPTS_VALUE;
444extern __thread volatile int longjmp_value;
445
446static void
448{
449 received_sigterm = sig;
452}
453
454#if HAVE_SETCONSOLECTRLHANDLER
455static BOOL WINAPI CtrlHandler(DWORD fdwCtrlType)
456{
457 av_log(NULL, AV_LOG_DEBUG, "\nReceived windows signal %ld\n", fdwCtrlType);
458
459 switch (fdwCtrlType)
460 {
461 case CTRL_C_EVENT:
462 case CTRL_BREAK_EVENT:
463 sigterm_handler(SIGINT);
464 return TRUE;
465
466 case CTRL_CLOSE_EVENT:
467 case CTRL_LOGOFF_EVENT:
468 case CTRL_SHUTDOWN_EVENT:
469 sigterm_handler(SIGTERM);
470 /* Basically, with these 3 events, when we return from this method the
471 process is hard terminated, so stall as long as we need to
472 to try and let the main thread(s) clean up and gracefully terminate
473 (we have at most 5 seconds, but should be done far before that). */
474 while (!ffmpeg_exited) {
475 Sleep(0);
476 }
477 return TRUE;
478
479 default:
480 av_log(NULL, AV_LOG_ERROR, "Received unknown windows signal %ld\n", fdwCtrlType);
481 return FALSE;
482 }
483}
484#endif
485
486#ifdef __linux__
487#define SIGNAL(sig, func) \
488 do { \
489 action.sa_handler = func; \
490 sigaction(sig, &action, NULL); \
491 } while (0)
492#else
493#define SIGNAL(sig, func) \
494 signal(sig, func)
495#endif
496
497void term_init(void)
498{
499#if defined __linux__
500 #if defined __aarch64__ || defined __amd64__ || defined __x86_64__
501 struct sigaction action = {0};
502 #else
503 struct sigaction action = {{0}};
504 #endif
505
506 action.sa_handler = sigterm_handler;
507
508 /* block other interrupts while processing this one */
509 sigfillset(&action.sa_mask);
510
511 /* restart interruptible functions (i.e. don't fail with EINTR) */
512 action.sa_flags = SA_RESTART;
513#endif
514
515#if HAVE_TERMIOS_H
517 struct termios tty;
518 if (tcgetattr (0, &tty) == 0) {
519 oldtty = tty;
520 restore_tty = 1;
521
522 tty.c_iflag &= ~(IGNBRK|BRKINT|PARMRK|ISTRIP
523 |INLCR|IGNCR|ICRNL|IXON);
524 tty.c_oflag |= OPOST;
525 tty.c_lflag &= ~(ECHO|ECHONL|ICANON|IEXTEN);
526 tty.c_cflag &= ~(CSIZE|PARENB);
527 tty.c_cflag |= CS8;
528 tty.c_cc[VMIN] = 1;
529 tty.c_cc[VTIME] = 0;
530
531 tcsetattr (0, TCSANOW, &tty);
532 }
533 if (handleSIGQUIT == 1) {
534 signal(SIGQUIT, sigterm_handler); /* Quit (POSIX). */
535 }
536 }
537#endif
538
539 if (handleSIGINT == 1) {
540 signal(SIGINT , sigterm_handler); /* Interrupt (ANSI). */
541 }
542 if (handleSIGTERM == 1) {
543 signal(SIGTERM, sigterm_handler); /* Termination (ANSI). */
544 }
545#ifdef SIGXCPU
546 if (handleSIGXCPU == 1) {
547 signal(SIGXCPU, sigterm_handler);
548 }
549#endif
550#ifdef SIGPIPE
551 if (handleSIGPIPE == 1) {
552 signal(SIGPIPE, SIG_IGN); /* Broken pipe (POSIX). */
553 }
554#endif
555#if HAVE_SETCONSOLECTRLHANDLER
556 SetConsoleCtrlHandler((PHANDLER_ROUTINE) CtrlHandler, TRUE);
557#endif
558}
559
560/* read a key without blocking */
561static int read_key(void)
562{
563 unsigned char ch;
564#if HAVE_TERMIOS_H
565 int n = 1;
566 struct timeval tv;
567 fd_set rfds;
568
569 FD_ZERO(&rfds);
570 FD_SET(0, &rfds);
571 tv.tv_sec = 0;
572 tv.tv_usec = 0;
573 n = select(1, &rfds, NULL, NULL, &tv);
574 if (n > 0) {
575 n = read(0, &ch, 1);
576 if (n == 1)
577 return ch;
578
579 return n;
580 }
581#elif HAVE_KBHIT
582# if HAVE_PEEKNAMEDPIPE
583 static int is_pipe;
584 static HANDLE input_handle;
585 DWORD dw, nchars;
586 if(!input_handle){
587 input_handle = GetStdHandle(STD_INPUT_HANDLE);
588 is_pipe = !GetConsoleMode(input_handle, &dw);
589 }
590
591 if (is_pipe) {
592 /* When running under a GUI, you will end here. */
593 if (!PeekNamedPipe(input_handle, NULL, 0, NULL, &nchars, NULL)) {
594 // input pipe may have been closed by the program that ran ffmpeg
595 return -1;
596 }
597 //Read it
598 if(nchars != 0) {
599 read(0, &ch, 1);
600 return ch;
601 }else{
602 return -1;
603 }
604 }
605# endif
606 if(kbhit())
607 return(getch());
608#endif
609 return -1;
610}
611
612int decode_interrupt_cb(void *ctx);
613
615{
616 return received_nb_signals > atomic_load(&transcode_init_done);
617}
618
619__thread const AVIOInterruptCB int_cb = { decode_interrupt_cb, NULL };
620
621static void ffmpeg_cleanup(int ret)
622{
623 int i, j;
624
625 if (do_benchmark) {
626 int maxrss = getmaxrss() / 1024;
627 av_log(NULL, AV_LOG_INFO, "bench: maxrss=%ikB\n", maxrss);
628 }
629
630 for (i = 0; i < nb_filtergraphs; i++) {
631 FilterGraph *fg = filtergraphs[i];
632 avfilter_graph_free(&fg->graph);
633 for (j = 0; j < fg->nb_inputs; j++) {
634 InputFilter *ifilter = fg->inputs[j];
635 struct InputStream *ist = ifilter->ist;
636
637 while (av_fifo_size(ifilter->frame_queue)) {
638 AVFrame *frame;
639 av_fifo_generic_read(ifilter->frame_queue, &frame,
640 sizeof(frame), NULL);
641 av_frame_free(&frame);
642 }
643 av_fifo_freep(&ifilter->frame_queue);
644 if (ist->sub2video.sub_queue) {
645 while (av_fifo_size(ist->sub2video.sub_queue)) {
646 AVSubtitle sub;
647 av_fifo_generic_read(ist->sub2video.sub_queue,
648 &sub, sizeof(sub), NULL);
649 avsubtitle_free(&sub);
650 }
651 av_fifo_freep(&ist->sub2video.sub_queue);
652 }
653 av_buffer_unref(&ifilter->hw_frames_ctx);
654 av_freep(&ifilter->name);
655 av_freep(&fg->inputs[j]);
656 }
657 av_freep(&fg->inputs);
658 for (j = 0; j < fg->nb_outputs; j++) {
659 OutputFilter *ofilter = fg->outputs[j];
660
661 avfilter_inout_free(&ofilter->out_tmp);
662 av_freep(&ofilter->name);
663 av_freep(&ofilter->formats);
664 av_freep(&ofilter->channel_layouts);
665 av_freep(&ofilter->sample_rates);
666 av_freep(&fg->outputs[j]);
667 }
668 av_freep(&fg->outputs);
669 av_freep(&fg->graph_desc);
670
671 av_freep(&filtergraphs[i]);
672 }
673 av_freep(&filtergraphs);
674
675 av_freep(&subtitle_out);
676
677 /* close files */
678 for (i = 0; i < nb_output_files; i++) {
679 OutputFile *of = output_files[i];
680 AVFormatContext *s;
681 if (!of)
682 continue;
683 s = of->ctx;
684 if (s && s->oformat && !(s->oformat->flags & AVFMT_NOFILE))
685 avio_closep(&s->pb);
686 avformat_free_context(s);
687 av_dict_free(&of->opts);
688
689 av_freep(&output_files[i]);
690 }
691 for (i = 0; i < nb_output_streams; i++) {
693
694 if (!ost)
695 continue;
696
697 av_bsf_free(&ost->bsf_ctx);
698
699 av_frame_free(&ost->filtered_frame);
700 av_frame_free(&ost->last_frame);
701 av_packet_free(&ost->pkt);
702 av_dict_free(&ost->encoder_opts);
703
704 av_freep(&ost->forced_keyframes);
705 av_expr_free(ost->forced_keyframes_pexpr);
706 av_freep(&ost->avfilter);
707 av_freep(&ost->logfile_prefix);
708
709 av_freep(&ost->audio_channels_map);
711
712 av_dict_free(&ost->sws_dict);
713 av_dict_free(&ost->swr_opts);
714
715 avcodec_free_context(&ost->enc_ctx);
716 avcodec_parameters_free(&ost->ref_par);
717
718 if (ost->muxing_queue) {
719 while (av_fifo_size(ost->muxing_queue)) {
720 AVPacket *pkt;
721 av_fifo_generic_read(ost->muxing_queue, &pkt, sizeof(pkt), NULL);
722 av_packet_free(&pkt);
723 }
724 av_fifo_freep(&ost->muxing_queue);
725 }
726
727 av_freep(&output_streams[i]);
728 }
729#if HAVE_THREADS
730 free_input_threads();
731#endif
732 for (i = 0; i < nb_input_files; i++) {
733 avformat_close_input(&input_files[i]->ctx);
734 av_packet_free(&input_files[i]->pkt);
735 av_freep(&input_files[i]);
736 }
737 for (i = 0; i < nb_input_streams; i++) {
739
740 av_frame_free(&ist->decoded_frame);
741 av_frame_free(&ist->filter_frame);
742 av_packet_free(&ist->pkt);
743 av_dict_free(&ist->decoder_opts);
744 avsubtitle_free(&ist->prev_sub.subtitle);
745 av_frame_free(&ist->sub2video.frame);
746 av_freep(&ist->filters);
747 av_freep(&ist->hwaccel_device);
748 av_freep(&ist->dts_buffer);
749
750 avcodec_free_context(&ist->dec_ctx);
751
752 av_freep(&input_streams[i]);
753 }
754
755 if (vstats_file) {
756 if (fclose(vstats_file))
757 av_log(NULL, AV_LOG_ERROR,
758 "Error closing vstats file, loss of information possible: %s\n",
759 av_err2str(AVERROR(errno)));
760 }
761 av_freep(&vstats_filename);
762
763 av_freep(&input_streams);
764 av_freep(&input_files);
765 av_freep(&output_streams);
766 av_freep(&output_files);
767
768 uninit_opts();
769
770 avformat_network_deinit();
771
772 if (received_sigterm) {
773 av_log(NULL, AV_LOG_INFO, "Exiting normally, received signal %d.\n",
774 (int) received_sigterm);
775 } else if (cancelRequested(globalSessionId)) {
776 av_log(NULL, AV_LOG_INFO, "Exiting normally, received cancel request.\n");
777 } else if (ret && atomic_load(&transcode_init_done)) {
778 av_log(NULL, AV_LOG_INFO, "Conversion failed!\n");
779 }
780 term_exit();
781 ffmpeg_exited = 1;
782}
783
784void remove_avoptions(AVDictionary **a, AVDictionary *b)
785{
786 AVDictionaryEntry *t = NULL;
787
788 while ((t = av_dict_get(b, "", t, AV_DICT_IGNORE_SUFFIX))) {
789 av_dict_set(a, t->key, NULL, AV_DICT_MATCH_CASE);
790 }
791}
792
793void assert_avoptions(AVDictionary *m)
794{
795 AVDictionaryEntry *t;
796 if ((t = av_dict_get(m, "", NULL, AV_DICT_IGNORE_SUFFIX))) {
797 av_log(NULL, AV_LOG_FATAL, "Option %s not found.\n", t->key);
798 exit_program(1);
799 }
800}
801
802static void abort_codec_experimental(const AVCodec *c, int encoder)
803{
804 exit_program(1);
805}
806
807static void update_benchmark(const char *fmt, ...)
808{
809 if (do_benchmark_all) {
811 va_list va;
812 char buf[1024];
813
814 if (fmt) {
815 va_start(va, fmt);
816 vsnprintf(buf, sizeof(buf), fmt, va);
817 va_end(va);
818 av_log(NULL, AV_LOG_INFO,
819 "bench: %8" PRIu64 " user %8" PRIu64 " sys %8" PRIu64 " real %s \n",
823 }
824 current_time = t;
825 }
826}
827
829{
830 int i;
831 for (i = 0; i < nb_output_streams; i++) {
832 OutputStream *ost2 = output_streams[i];
833 ost2->finished |= ost == ost2 ? this_stream : others;
834 }
835}
836
837static void write_packet(OutputFile *of, AVPacket *pkt, OutputStream *ost, int unqueue)
838{
839 AVFormatContext *s = of->ctx;
840 AVStream *st = ost->st;
841 int ret;
842
843 /*
844 * Audio encoders may split the packets -- #frames in != #packets out.
845 * But there is no reordering, so we can limit the number of output packets
846 * by simply dropping them here.
847 * Counting encoded video frames needs to be done separately because of
848 * reordering, see do_video_out().
849 * Do not count the packet when unqueued because it has been counted when queued.
850 */
851 if (!(st->codecpar->codec_type == AVMEDIA_TYPE_VIDEO && ost->encoding_needed) && !unqueue) {
852 if (ost->frame_number >= ost->max_frames) {
853 av_packet_unref(pkt);
854 return;
855 }
856 ost->frame_number++;
857 }
858
859 if (!of->header_written) {
860 AVPacket *tmp_pkt;
861 /* the muxer is not initialized yet, buffer the packet */
862 if (!av_fifo_space(ost->muxing_queue)) {
863 unsigned int are_we_over_size =
865 int new_size = are_we_over_size ?
866 FFMIN(2 * av_fifo_size(ost->muxing_queue),
868 2 * av_fifo_size(ost->muxing_queue);
869
870 if (new_size <= av_fifo_size(ost->muxing_queue)) {
871 av_log(NULL, AV_LOG_ERROR,
872 "Too many packets buffered for output stream %d:%d.\n",
873 ost->file_index, ost->st->index);
874 exit_program(1);
875 }
876 ret = av_fifo_realloc2(ost->muxing_queue, new_size);
877 if (ret < 0)
878 exit_program(1);
879 }
880 ret = av_packet_make_refcounted(pkt);
881 if (ret < 0)
882 exit_program(1);
883 tmp_pkt = av_packet_alloc();
884 if (!tmp_pkt)
885 exit_program(1);
886 av_packet_move_ref(tmp_pkt, pkt);
887 ost->muxing_queue_data_size += tmp_pkt->size;
888 av_fifo_generic_write(ost->muxing_queue, &tmp_pkt, sizeof(tmp_pkt), NULL);
889 return;
890 }
891
892 if ((st->codecpar->codec_type == AVMEDIA_TYPE_VIDEO && video_sync_method == VSYNC_DROP) ||
893 (st->codecpar->codec_type == AVMEDIA_TYPE_AUDIO && audio_sync_method < 0))
894 pkt->pts = pkt->dts = AV_NOPTS_VALUE;
895
896 if (st->codecpar->codec_type == AVMEDIA_TYPE_VIDEO) {
897 int i;
898 uint8_t *sd = av_packet_get_side_data(pkt, AV_PKT_DATA_QUALITY_STATS,
899 NULL);
900 ost->quality = sd ? AV_RL32(sd) : -1;
901 ost->pict_type = sd ? sd[4] : AV_PICTURE_TYPE_NONE;
902
903 for (i = 0; i<FF_ARRAY_ELEMS(ost->error); i++) {
904 if (sd && i < sd[5])
905 ost->error[i] = AV_RL64(sd + 8 + 8*i);
906 else
907 ost->error[i] = -1;
908 }
909
910 if (ost->frame_rate.num && ost->is_cfr) {
911 if (pkt->duration > 0)
912 av_log(NULL, AV_LOG_WARNING, "Overriding packet duration by frame rate, this should not happen\n");
913 pkt->duration = av_rescale_q(1, av_inv_q(ost->frame_rate),
915 }
916 }
917
918 av_packet_rescale_ts(pkt, ost->mux_timebase, ost->st->time_base);
919
920 if (!(s->oformat->flags & AVFMT_NOTIMESTAMPS)) {
921 if (pkt->dts != AV_NOPTS_VALUE &&
922 pkt->pts != AV_NOPTS_VALUE &&
923 pkt->dts > pkt->pts) {
924 av_log(s, AV_LOG_WARNING, "Invalid DTS: %"PRId64" PTS: %"PRId64" in output stream %d:%d, replacing by guess\n",
925 pkt->dts, pkt->pts,
926 ost->file_index, ost->st->index);
927 pkt->pts =
928 pkt->dts = pkt->pts + pkt->dts + ost->last_mux_dts + 1
929 - FFMIN3(pkt->pts, pkt->dts, ost->last_mux_dts + 1)
930 - FFMAX3(pkt->pts, pkt->dts, ost->last_mux_dts + 1);
931 }
932 if ((st->codecpar->codec_type == AVMEDIA_TYPE_AUDIO || st->codecpar->codec_type == AVMEDIA_TYPE_VIDEO || st->codecpar->codec_type == AVMEDIA_TYPE_SUBTITLE) &&
933 pkt->dts != AV_NOPTS_VALUE &&
934 !(st->codecpar->codec_id == AV_CODEC_ID_VP9 && ost->stream_copy) &&
935 ost->last_mux_dts != AV_NOPTS_VALUE) {
936 int64_t max = ost->last_mux_dts + !(s->oformat->flags & AVFMT_TS_NONSTRICT);
937 if (pkt->dts < max) {
938 int loglevel = max - pkt->dts > 2 || st->codecpar->codec_type == AVMEDIA_TYPE_VIDEO ? AV_LOG_WARNING : AV_LOG_DEBUG;
939 if (exit_on_error)
940 loglevel = AV_LOG_ERROR;
941 av_log(s, loglevel, "Non-monotonous DTS in output stream "
942 "%d:%d; previous: %"PRId64", current: %"PRId64"; ",
943 ost->file_index, ost->st->index, ost->last_mux_dts, pkt->dts);
944 if (exit_on_error) {
945 av_log(NULL, AV_LOG_FATAL, "aborting.\n");
946 exit_program(1);
947 }
948 av_log(s, loglevel, "changing to %"PRId64". This may result "
949 "in incorrect timestamps in the output file.\n",
950 max);
951 if (pkt->pts >= pkt->dts)
952 pkt->pts = FFMAX(pkt->pts, max);
953 pkt->dts = max;
954 }
955 }
956 }
957 ost->last_mux_dts = pkt->dts;
958
959 ost->data_size += pkt->size;
961
962 pkt->stream_index = ost->index;
963
964 if (debug_ts) {
965 av_log(NULL, AV_LOG_INFO, "muxer <- type:%s "
966 "pkt_pts:%s pkt_pts_time:%s pkt_dts:%s pkt_dts_time:%s size:%d\n",
967 av_get_media_type_string(ost->enc_ctx->codec_type),
968 av_ts2str(pkt->pts), av_ts2timestr(pkt->pts, &ost->st->time_base),
969 av_ts2str(pkt->dts), av_ts2timestr(pkt->dts, &ost->st->time_base),
970 pkt->size
971 );
972 }
973
974 ret = av_interleaved_write_frame(s, pkt);
975 if (ret < 0) {
976 print_error("av_interleaved_write_frame()", ret);
979 }
980 av_packet_unref(pkt);
981}
982
984{
986
988 if (of->shortest) {
989 int64_t end = av_rescale_q(ost->sync_opts - ost->first_pts, ost->enc_ctx->time_base, AV_TIME_BASE_Q);
990 of->recording_time = FFMIN(of->recording_time, end);
991 }
992}
993
994/*
995 * Send a single packet to the output, applying any bitstream filters
996 * associated with the output stream. This may result in any number
997 * of packets actually being written, depending on what bitstream
998 * filters are applied. The supplied packet is consumed and will be
999 * blank (as if newly-allocated) when this function returns.
1000 *
1001 * If eof is set, instead indicate EOF to all bitstream filters and
1002 * therefore flush any delayed packets to the output. A blank packet
1003 * must be supplied in this case.
1004 */
1005static void output_packet(OutputFile *of, AVPacket *pkt,
1006 OutputStream *ost, int eof)
1007{
1008 int ret = 0;
1009
1010 /* apply the output bitstream filters */
1011 if (ost->bsf_ctx) {
1012 ret = av_bsf_send_packet(ost->bsf_ctx, eof ? NULL : pkt);
1013 if (ret < 0)
1014 goto finish;
1015 while ((ret = av_bsf_receive_packet(ost->bsf_ctx, pkt)) >= 0)
1016 write_packet(of, pkt, ost, 0);
1017 if (ret == AVERROR(EAGAIN))
1018 ret = 0;
1019 } else if (!eof)
1020 write_packet(of, pkt, ost, 0);
1021
1022finish:
1023 if (ret < 0 && ret != AVERROR_EOF) {
1024 av_log(NULL, AV_LOG_ERROR, "Error applying bitstream filters to an output "
1025 "packet for stream #%d:%d.\n", ost->file_index, ost->index);
1026 if(exit_on_error)
1027 exit_program(1);
1028 }
1029}
1030
1032{
1034
1035 if (of->recording_time != INT64_MAX &&
1036 av_compare_ts(ost->sync_opts - ost->first_pts, ost->enc_ctx->time_base, of->recording_time,
1037 AV_TIME_BASE_Q) >= 0) {
1039 return 0;
1040 }
1041 return 1;
1042}
1043
1044static double adjust_frame_pts_to_encoder_tb(OutputFile *of, OutputStream *ost, AVFrame *frame)
1045{
1046 double float_pts = AV_NOPTS_VALUE; // this is identical to frame.pts but with higher precision
1047 AVCodecContext *enc = ost->enc_ctx;
1048 if (!frame || frame->pts == AV_NOPTS_VALUE ||
1049 !enc || !ost->filter || !ost->filter->graph->graph)
1050 goto early_exit;
1051
1052 {
1053 AVFilterContext *filter = ost->filter->filter;
1054
1055 int64_t start_time = (of->start_time == AV_NOPTS_VALUE) ? 0 : of->start_time;
1056 AVRational filter_tb = av_buffersink_get_time_base(filter);
1057 AVRational tb = enc->time_base;
1058 int extra_bits = av_clip(29 - av_log2(tb.den), 0, 16);
1059
1060 tb.den <<= extra_bits;
1061 float_pts =
1062 av_rescale_q(frame->pts, filter_tb, tb) -
1063 av_rescale_q(start_time, AV_TIME_BASE_Q, tb);
1064 float_pts /= 1 << extra_bits;
1065 // avoid exact midoints to reduce the chance of rounding differences, this can be removed in case the fps code is changed to work with integers
1066 float_pts += FFSIGN(float_pts) * 1.0 / (1<<17);
1067
1068 frame->pts =
1069 av_rescale_q(frame->pts, filter_tb, enc->time_base) -
1070 av_rescale_q(start_time, AV_TIME_BASE_Q, enc->time_base);
1071 }
1072
1073early_exit:
1074
1075 if (debug_ts) {
1076 av_log(NULL, AV_LOG_INFO, "filter -> pts:%s pts_time:%s exact:%f time_base:%d/%d\n",
1077 frame ? av_ts2str(frame->pts) : "NULL",
1078 frame ? av_ts2timestr(frame->pts, &enc->time_base) : "NULL",
1079 float_pts,
1080 enc ? enc->time_base.num : -1,
1081 enc ? enc->time_base.den : -1);
1082 }
1083
1084 return float_pts;
1085}
1086
1087static int init_output_stream(OutputStream *ost, AVFrame *frame, char *error, int error_len);
1088
1089static int init_output_stream_wrapper(OutputStream *ost, AVFrame *frame, unsigned int fatal)
1090{
1091 int ret = AVERROR_BUG;
1092 char error[1024] = {0};
1093
1094 if (ost->initialized)
1095 return 0;
1096
1097 ret = init_output_stream(ost, frame, error, sizeof(error));
1098 if (ret < 0) {
1099 av_log(NULL, AV_LOG_ERROR, "Error initializing output stream %d:%d -- %s\n",
1100 ost->file_index, ost->index, error);
1101
1102 if (fatal)
1103 exit_program(1);
1104 }
1105
1106 return ret;
1107}
1108
1110 AVFrame *frame)
1111{
1112 AVCodecContext *enc = ost->enc_ctx;
1113 AVPacket *pkt = ost->pkt;
1114 int ret;
1115
1117
1119 return;
1120
1121 if (frame->pts == AV_NOPTS_VALUE || audio_sync_method < 0)
1122 frame->pts = ost->sync_opts;
1123 ost->sync_opts = frame->pts + frame->nb_samples;
1124 ost->samples_encoded += frame->nb_samples;
1126
1127 update_benchmark(NULL);
1128 if (debug_ts) {
1129 av_log(NULL, AV_LOG_INFO, "encoder <- type:audio "
1130 "frame_pts:%s frame_pts_time:%s time_base:%d/%d\n",
1131 av_ts2str(frame->pts), av_ts2timestr(frame->pts, &enc->time_base),
1132 enc->time_base.num, enc->time_base.den);
1133 }
1134
1135 ret = avcodec_send_frame(enc, frame);
1136 if (ret < 0)
1137 goto error;
1138
1139 while (1) {
1140 av_packet_unref(pkt);
1141 ret = avcodec_receive_packet(enc, pkt);
1142 if (ret == AVERROR(EAGAIN))
1143 break;
1144 if (ret < 0)
1145 goto error;
1146
1147 update_benchmark("encode_audio %d.%d", ost->file_index, ost->index);
1148
1149 av_packet_rescale_ts(pkt, enc->time_base, ost->mux_timebase);
1150
1151 if (debug_ts) {
1152 av_log(NULL, AV_LOG_INFO, "encoder -> type:audio "
1153 "pkt_pts:%s pkt_pts_time:%s pkt_dts:%s pkt_dts_time:%s\n",
1154 av_ts2str(pkt->pts), av_ts2timestr(pkt->pts, &enc->time_base),
1155 av_ts2str(pkt->dts), av_ts2timestr(pkt->dts, &enc->time_base));
1156 }
1157
1158 output_packet(of, pkt, ost, 0);
1159 }
1160
1161 return;
1162error:
1163 av_log(NULL, AV_LOG_FATAL, "Audio encoding failed\n");
1164 exit_program(1);
1165}
1166
1169 AVSubtitle *sub)
1170{
1171 int subtitle_out_max_size = 1024 * 1024;
1172 int subtitle_out_size, nb, i;
1173 AVCodecContext *enc;
1174 AVPacket *pkt = ost->pkt;
1175 int64_t pts;
1176
1177 if (sub->pts == AV_NOPTS_VALUE) {
1178 av_log(NULL, AV_LOG_ERROR, "Subtitle packets must have a pts\n");
1179 if (exit_on_error)
1180 exit_program(1);
1181 return;
1182 }
1183
1184 enc = ost->enc_ctx;
1185
1186 if (!subtitle_out) {
1187 subtitle_out = av_malloc(subtitle_out_max_size);
1188 if (!subtitle_out) {
1189 av_log(NULL, AV_LOG_FATAL, "Failed to allocate subtitle_out\n");
1190 exit_program(1);
1191 }
1192 }
1193
1194 /* Note: DVB subtitle need one packet to draw them and one other
1195 packet to clear them */
1196 /* XXX: signal it in the codec context ? */
1197 if (enc->codec_id == AV_CODEC_ID_DVB_SUBTITLE)
1198 nb = 2;
1199 else
1200 nb = 1;
1201
1202 /* shift timestamp to honor -ss and make check_recording_time() work with -t */
1203 pts = sub->pts;
1204 if (output_files[ost->file_index]->start_time != AV_NOPTS_VALUE)
1206 for (i = 0; i < nb; i++) {
1207 unsigned save_num_rects = sub->num_rects;
1208
1209 ost->sync_opts = av_rescale_q(pts, AV_TIME_BASE_Q, enc->time_base);
1211 return;
1212
1213 sub->pts = pts;
1214 // start_display_time is required to be 0
1215 sub->pts += av_rescale_q(sub->start_display_time, (AVRational){ 1, 1000 }, AV_TIME_BASE_Q);
1216 sub->end_display_time -= sub->start_display_time;
1217 sub->start_display_time = 0;
1218 if (i == 1)
1219 sub->num_rects = 0;
1220
1222
1223 subtitle_out_size = avcodec_encode_subtitle(enc, subtitle_out,
1224 subtitle_out_max_size, sub);
1225 if (i == 1)
1226 sub->num_rects = save_num_rects;
1227 if (subtitle_out_size < 0) {
1228 av_log(NULL, AV_LOG_FATAL, "Subtitle encoding failed\n");
1229 exit_program(1);
1230 }
1231
1232 av_packet_unref(pkt);
1233 pkt->data = subtitle_out;
1234 pkt->size = subtitle_out_size;
1235 pkt->pts = av_rescale_q(sub->pts, AV_TIME_BASE_Q, ost->mux_timebase);
1236 pkt->duration = av_rescale_q(sub->end_display_time, (AVRational){ 1, 1000 }, ost->mux_timebase);
1237 if (enc->codec_id == AV_CODEC_ID_DVB_SUBTITLE) {
1238 /* XXX: the pts correction is handled here. Maybe handling
1239 it in the codec would be better */
1240 if (i == 0)
1241 pkt->pts += av_rescale_q(sub->start_display_time, (AVRational){ 1, 1000 }, ost->mux_timebase);
1242 else
1243 pkt->pts += av_rescale_q(sub->end_display_time, (AVRational){ 1, 1000 }, ost->mux_timebase);
1244 }
1245 pkt->dts = pkt->pts;
1246 output_packet(of, pkt, ost, 0);
1247 }
1248}
1249
1250static void do_video_out(OutputFile *of,
1252 AVFrame *next_picture)
1253{
1254 int ret, format_video_sync;
1255 AVPacket *pkt = ost->pkt;
1256 AVCodecContext *enc = ost->enc_ctx;
1257 AVRational frame_rate;
1258 int nb_frames, nb0_frames, i;
1259 double delta, delta0;
1260 double duration = 0;
1261 double sync_ipts = AV_NOPTS_VALUE;
1262 int frame_size = 0;
1263 InputStream *ist = NULL;
1264 AVFilterContext *filter = ost->filter->filter;
1265
1266 init_output_stream_wrapper(ost, next_picture, 1);
1267 sync_ipts = adjust_frame_pts_to_encoder_tb(of, ost, next_picture);
1268
1269 if (ost->source_index >= 0)
1271
1272 frame_rate = av_buffersink_get_frame_rate(filter);
1273 if (frame_rate.num > 0 && frame_rate.den > 0)
1274 duration = 1/(av_q2d(frame_rate) * av_q2d(enc->time_base));
1275
1276 if(ist && ist->st->start_time != AV_NOPTS_VALUE && ist->first_dts != AV_NOPTS_VALUE && ost->frame_rate.num)
1277 duration = FFMIN(duration, 1/(av_q2d(ost->frame_rate) * av_q2d(enc->time_base)));
1278
1279 if (!ost->filters_script &&
1280 !ost->filters &&
1281 (nb_filtergraphs == 0 || !filtergraphs[0]->graph_desc) &&
1282 next_picture &&
1283 ist &&
1284 lrintf(next_picture->pkt_duration * av_q2d(ist->st->time_base) / av_q2d(enc->time_base)) > 0) {
1285 duration = lrintf(next_picture->pkt_duration * av_q2d(ist->st->time_base) / av_q2d(enc->time_base));
1286 }
1287
1288 if (!next_picture) {
1289 //end, flushing
1290 nb0_frames = nb_frames = mid_pred(ost->last_nb0_frames[0],
1291 ost->last_nb0_frames[1],
1292 ost->last_nb0_frames[2]);
1293 } else {
1294 delta0 = sync_ipts - ost->sync_opts; // delta0 is the "drift" between the input frame (next_picture) and where it would fall in the output.
1295 delta = delta0 + duration;
1296
1297 /* by default, we output a single frame */
1298 nb0_frames = 0; // tracks the number of times the PREVIOUS frame should be duplicated, mostly for variable framerate (VFR)
1299 nb_frames = 1;
1300
1301 format_video_sync = video_sync_method;
1302 if (format_video_sync == VSYNC_AUTO) {
1303 if(!strcmp(of->ctx->oformat->name, "avi")) {
1304 format_video_sync = VSYNC_VFR;
1305 } else
1306 format_video_sync = (of->ctx->oformat->flags & AVFMT_VARIABLE_FPS) ? ((of->ctx->oformat->flags & AVFMT_NOTIMESTAMPS) ? VSYNC_PASSTHROUGH : VSYNC_VFR) : VSYNC_CFR;
1307 if ( ist
1308 && format_video_sync == VSYNC_CFR
1309 && input_files[ist->file_index]->ctx->nb_streams == 1
1310 && input_files[ist->file_index]->input_ts_offset == 0) {
1311 format_video_sync = VSYNC_VSCFR;
1312 }
1313 if (format_video_sync == VSYNC_CFR && copy_ts) {
1314 format_video_sync = VSYNC_VSCFR;
1315 }
1316 }
1317 ost->is_cfr = (format_video_sync == VSYNC_CFR || format_video_sync == VSYNC_VSCFR);
1318
1319 if (delta0 < 0 &&
1320 delta > 0 &&
1321 format_video_sync != VSYNC_PASSTHROUGH &&
1322 format_video_sync != VSYNC_DROP) {
1323 if (delta0 < -0.6) {
1324 av_log(NULL, AV_LOG_VERBOSE, "Past duration %f too large\n", -delta0);
1325 } else
1326 av_log(NULL, AV_LOG_DEBUG, "Clipping frame in rate conversion by %f\n", -delta0);
1327 sync_ipts = ost->sync_opts;
1328 duration += delta0;
1329 delta0 = 0;
1330 }
1331
1332 switch (format_video_sync) {
1333 case VSYNC_VSCFR:
1334 if (ost->frame_number == 0 && delta0 >= 0.5) {
1335 av_log(NULL, AV_LOG_DEBUG, "Not duplicating %d initial frames\n", (int)lrintf(delta0));
1336 delta = duration;
1337 delta0 = 0;
1338 ost->sync_opts = llrint(sync_ipts);
1339 }
1340 case VSYNC_CFR:
1341 // FIXME set to 0.5 after we fix some dts/pts bugs like in avidec.c
1342 if (frame_drop_threshold && delta < frame_drop_threshold && ost->frame_number) {
1343 nb_frames = 0;
1344 } else if (delta < -1.1)
1345 nb_frames = 0;
1346 else if (delta > 1.1) {
1347 nb_frames = lrintf(delta);
1348 if (delta0 > 1.1)
1349 nb0_frames = llrintf(delta0 - 0.6);
1350 }
1351 break;
1352 case VSYNC_VFR:
1353 if (delta <= -0.6)
1354 nb_frames = 0;
1355 else if (delta > 0.6)
1356 ost->sync_opts = llrint(sync_ipts);
1357 break;
1358 case VSYNC_DROP:
1359 case VSYNC_PASSTHROUGH:
1360 ost->sync_opts = llrint(sync_ipts);
1361 break;
1362 default:
1363 av_assert0(0);
1364 }
1365 }
1366
1367 nb_frames = FFMIN(nb_frames, ost->max_frames - ost->frame_number);
1368 nb0_frames = FFMIN(nb0_frames, nb_frames);
1369
1370 memmove(ost->last_nb0_frames + 1,
1372 sizeof(ost->last_nb0_frames[0]) * (FF_ARRAY_ELEMS(ost->last_nb0_frames) - 1));
1373 ost->last_nb0_frames[0] = nb0_frames;
1374
1375 if (nb0_frames == 0 && ost->last_dropped) {
1377 av_log(NULL, AV_LOG_VERBOSE,
1378 "*** dropping frame %d from stream %d at ts %"PRId64"\n",
1379 ost->frame_number, ost->st->index, ost->last_frame->pts);
1380 }
1381 if (nb_frames > (nb0_frames && ost->last_dropped) + (nb_frames > nb0_frames)) {
1382 if (nb_frames > dts_error_threshold * 30) {
1383 av_log(NULL, AV_LOG_ERROR, "%d frame duplication too large, skipping\n", nb_frames - 1);
1385 return;
1386 }
1387 nb_frames_dup += nb_frames - (nb0_frames && ost->last_dropped) - (nb_frames > nb0_frames);
1388 av_log(NULL, AV_LOG_VERBOSE, "*** %d dup!\n", nb_frames - 1);
1389 if (nb_frames_dup > dup_warning) {
1390 av_log(NULL, AV_LOG_WARNING, "More than %d frames duplicated\n", dup_warning);
1391 dup_warning *= 10;
1392 }
1393 }
1394 ost->last_dropped = nb_frames == nb0_frames && next_picture;
1395 ost->dropped_keyframe = ost->last_dropped && next_picture && next_picture->key_frame;
1396
1397 /* duplicates frame if needed */
1398 for (i = 0; i < nb_frames; i++) {
1399 AVFrame *in_picture;
1400 int forced_keyframe = 0;
1401 double pts_time;
1402
1403 if (i < nb0_frames && ost->last_frame) {
1404 in_picture = ost->last_frame;
1405 } else
1406 in_picture = next_picture;
1407
1408 if (!in_picture)
1409 return;
1410
1411 in_picture->pts = ost->sync_opts;
1412
1414 return;
1415
1416 in_picture->quality = enc->global_quality;
1417 in_picture->pict_type = 0;
1418
1419 if (ost->forced_kf_ref_pts == AV_NOPTS_VALUE &&
1420 in_picture->pts != AV_NOPTS_VALUE)
1421 ost->forced_kf_ref_pts = in_picture->pts;
1422
1423 pts_time = in_picture->pts != AV_NOPTS_VALUE ?
1424 (in_picture->pts - ost->forced_kf_ref_pts) * av_q2d(enc->time_base) : NAN;
1426 in_picture->pts >= ost->forced_kf_pts[ost->forced_kf_index]) {
1428 forced_keyframe = 1;
1429 } else if (ost->forced_keyframes_pexpr) {
1430 double res;
1432 res = av_expr_eval(ost->forced_keyframes_pexpr,
1434 ff_dlog(NULL, "force_key_frame: n:%f n_forced:%f prev_forced_n:%f t:%f prev_forced_t:%f -> res:%f\n",
1440 res);
1441 if (res) {
1442 forced_keyframe = 1;
1448 }
1449
1451 } else if ( ost->forced_keyframes
1452 && !strncmp(ost->forced_keyframes, "source", 6)
1453 && in_picture->key_frame==1
1454 && !i) {
1455 forced_keyframe = 1;
1456 } else if ( ost->forced_keyframes
1457 && !strncmp(ost->forced_keyframes, "source_no_drop", 14)
1458 && !i) {
1459 forced_keyframe = (in_picture->key_frame == 1) || ost->dropped_keyframe;
1460 ost->dropped_keyframe = 0;
1461 }
1462
1463 if (forced_keyframe) {
1464 in_picture->pict_type = AV_PICTURE_TYPE_I;
1465 av_log(NULL, AV_LOG_DEBUG, "Forced keyframe at time %f\n", pts_time);
1466 }
1467
1468 update_benchmark(NULL);
1469 if (debug_ts) {
1470 av_log(NULL, AV_LOG_INFO, "encoder <- type:video "
1471 "frame_pts:%s frame_pts_time:%s time_base:%d/%d\n",
1472 av_ts2str(in_picture->pts), av_ts2timestr(in_picture->pts, &enc->time_base),
1473 enc->time_base.num, enc->time_base.den);
1474 }
1475
1477
1478 ret = avcodec_send_frame(enc, in_picture);
1479 if (ret < 0)
1480 goto error;
1481 // Make sure Closed Captions will not be duplicated
1482 av_frame_remove_side_data(in_picture, AV_FRAME_DATA_A53_CC);
1483
1484 while (1) {
1485 av_packet_unref(pkt);
1486 ret = avcodec_receive_packet(enc, pkt);
1487 update_benchmark("encode_video %d.%d", ost->file_index, ost->index);
1488 if (ret == AVERROR(EAGAIN))
1489 break;
1490 if (ret < 0)
1491 goto error;
1492
1493 if (debug_ts) {
1494 av_log(NULL, AV_LOG_INFO, "encoder -> type:video "
1495 "pkt_pts:%s pkt_pts_time:%s pkt_dts:%s pkt_dts_time:%s\n",
1496 av_ts2str(pkt->pts), av_ts2timestr(pkt->pts, &enc->time_base),
1497 av_ts2str(pkt->dts), av_ts2timestr(pkt->dts, &enc->time_base));
1498 }
1499
1500 if (pkt->pts == AV_NOPTS_VALUE && !(enc->codec->capabilities & AV_CODEC_CAP_DELAY))
1501 pkt->pts = ost->sync_opts;
1502
1503 av_packet_rescale_ts(pkt, enc->time_base, ost->mux_timebase);
1504
1505 if (debug_ts) {
1506 av_log(NULL, AV_LOG_INFO, "encoder -> type:video "
1507 "pkt_pts:%s pkt_pts_time:%s pkt_dts:%s pkt_dts_time:%s\n",
1508 av_ts2str(pkt->pts), av_ts2timestr(pkt->pts, &ost->mux_timebase),
1509 av_ts2str(pkt->dts), av_ts2timestr(pkt->dts, &ost->mux_timebase));
1510 }
1511
1512 frame_size = pkt->size;
1513 output_packet(of, pkt, ost, 0);
1514
1515 /* if two pass, output log */
1516 if (ost->logfile && enc->stats_out) {
1517 fprintf(ost->logfile, "%s", enc->stats_out);
1518 }
1519 }
1520 ost->sync_opts++;
1521 /*
1522 * For video, number of frames in == number of packets out.
1523 * But there may be reordering, so we can't throw away frames on encoder
1524 * flush, we need to limit them here, before they go into encoder.
1525 */
1526 ost->frame_number++;
1527
1528 if (vstats_filename && frame_size)
1529 do_video_stats(ost, frame_size);
1530 }
1531
1532 if (!ost->last_frame)
1533 ost->last_frame = av_frame_alloc();
1534 av_frame_unref(ost->last_frame);
1535 if (next_picture && ost->last_frame)
1536 av_frame_ref(ost->last_frame, next_picture);
1537 else
1538 av_frame_free(&ost->last_frame);
1539
1540 return;
1541error:
1542 av_log(NULL, AV_LOG_FATAL, "Video encoding failed\n");
1543 exit_program(1);
1544}
1545
1546static double psnr(double d)
1547{
1548 return -10.0 * log10(d);
1549}
1550
1551static void do_video_stats(OutputStream *ost, int frame_size)
1552{
1553 AVCodecContext *enc;
1554 int frame_number;
1555 double ti1, bitrate, avg_bitrate;
1556
1557 /* this is executed just the first time do_video_stats is called */
1558 if (!vstats_file) {
1559 vstats_file = fopen(vstats_filename, "w");
1560 if (!vstats_file) {
1561 perror("fopen");
1562 exit_program(1);
1563 }
1564 }
1565
1566 enc = ost->enc_ctx;
1567 if (enc->codec_type == AVMEDIA_TYPE_VIDEO) {
1568 frame_number = ost->st->nb_frames;
1569 if (vstats_version <= 1) {
1570 fprintf(vstats_file, "frame= %5d q= %2.1f ", frame_number,
1571 ost->quality / (float)FF_QP2LAMBDA);
1572 } else {
1573 fprintf(vstats_file, "out= %2d st= %2d frame= %5d q= %2.1f ", ost->file_index, ost->index, frame_number,
1574 ost->quality / (float)FF_QP2LAMBDA);
1575 }
1576
1577 if (ost->error[0]>=0 && (enc->flags & AV_CODEC_FLAG_PSNR))
1578 fprintf(vstats_file, "PSNR= %6.2f ", psnr(ost->error[0] / (enc->width * enc->height * 255.0 * 255.0)));
1579
1580 fprintf(vstats_file,"f_size= %6d ", frame_size);
1581 /* compute pts value */
1582 ti1 = av_stream_get_end_pts(ost->st) * av_q2d(ost->st->time_base);
1583 if (ti1 < 0.01)
1584 ti1 = 0.01;
1585
1586 bitrate = (frame_size * 8) / av_q2d(enc->time_base) / 1000.0;
1587 avg_bitrate = (double)(ost->data_size * 8) / ti1 / 1000.0;
1588 fprintf(vstats_file, "s_size= %8.0fkB time= %0.3f br= %7.1fkbits/s avg_br= %7.1fkbits/s ",
1589 (double)ost->data_size / 1024, ti1, bitrate, avg_bitrate);
1590 fprintf(vstats_file, "type= %c\n", av_get_picture_type_char(ost->pict_type));
1591 }
1592}
1593
1595{
1597 int i;
1598
1600
1601 if (of->shortest) {
1602 for (i = 0; i < of->ctx->nb_streams; i++)
1604 }
1605}
1606
1613static int reap_filters(int flush)
1614{
1615 AVFrame *filtered_frame = NULL;
1616 int i;
1617
1618 /* Reap all buffers present in the buffer sinks */
1619 for (i = 0; i < nb_output_streams; i++) {
1622 AVFilterContext *filter;
1623 AVCodecContext *enc = ost->enc_ctx;
1624 int ret = 0;
1625
1626 if (!ost->filter || !ost->filter->graph->graph)
1627 continue;
1628 filter = ost->filter->filter;
1629
1630 /*
1631 * Unlike video, with audio the audio frame size matters.
1632 * Currently we are fully reliant on the lavfi filter chain to
1633 * do the buffering deed for us, and thus the frame size parameter
1634 * needs to be set accordingly. Where does one get the required
1635 * frame size? From the initialized AVCodecContext of an audio
1636 * encoder. Thus, if we have gotten to an audio stream, initialize
1637 * the encoder earlier than receiving the first AVFrame.
1638 */
1639 if (av_buffersink_get_type(filter) == AVMEDIA_TYPE_AUDIO)
1641
1642 if (!ost->pkt && !(ost->pkt = av_packet_alloc())) {
1643 return AVERROR(ENOMEM);
1644 }
1645 if (!ost->filtered_frame && !(ost->filtered_frame = av_frame_alloc())) {
1646 return AVERROR(ENOMEM);
1647 }
1648 filtered_frame = ost->filtered_frame;
1649
1650 while (1) {
1651 ret = av_buffersink_get_frame_flags(filter, filtered_frame,
1652 AV_BUFFERSINK_FLAG_NO_REQUEST);
1653 if (ret < 0) {
1654 if (ret != AVERROR(EAGAIN) && ret != AVERROR_EOF) {
1655 av_log(NULL, AV_LOG_WARNING,
1656 "Error in av_buffersink_get_frame_flags(): %s\n", av_err2str(ret));
1657 } else if (flush && ret == AVERROR_EOF) {
1658 if (av_buffersink_get_type(filter) == AVMEDIA_TYPE_VIDEO)
1659 do_video_out(of, ost, NULL);
1660 }
1661 break;
1662 }
1663 if (ost->finished) {
1664 av_frame_unref(filtered_frame);
1665 continue;
1666 }
1667
1668 switch (av_buffersink_get_type(filter)) {
1669 case AVMEDIA_TYPE_VIDEO:
1670 if (!ost->frame_aspect_ratio.num)
1671 enc->sample_aspect_ratio = filtered_frame->sample_aspect_ratio;
1672
1673 do_video_out(of, ost, filtered_frame);
1674 break;
1675 case AVMEDIA_TYPE_AUDIO:
1676 if (!(enc->codec->capabilities & AV_CODEC_CAP_PARAM_CHANGE) &&
1677 enc->channels != filtered_frame->channels) {
1678 av_log(NULL, AV_LOG_ERROR,
1679 "Audio filter graph output is not normalized and encoder does not support parameter changes\n");
1680 break;
1681 }
1682 do_audio_out(of, ost, filtered_frame);
1683 break;
1684 default:
1685 // TODO support subtitle filters
1686 av_assert0(0);
1687 }
1688
1689 av_frame_unref(filtered_frame);
1690 }
1691 }
1692
1693 return 0;
1694}
1695
1696static void print_final_stats(int64_t total_size)
1697{
1698 uint64_t video_size = 0, audio_size = 0, extra_size = 0, other_size = 0;
1699 uint64_t subtitle_size = 0;
1700 uint64_t data_size = 0;
1701 float percent = -1.0;
1702 int i, j;
1703 int pass1_used = 1;
1704
1705 for (i = 0; i < nb_output_streams; i++) {
1707 switch (ost->enc_ctx->codec_type) {
1708 case AVMEDIA_TYPE_VIDEO: video_size += ost->data_size; break;
1709 case AVMEDIA_TYPE_AUDIO: audio_size += ost->data_size; break;
1710 case AVMEDIA_TYPE_SUBTITLE: subtitle_size += ost->data_size; break;
1711 default: other_size += ost->data_size; break;
1712 }
1713 extra_size += ost->enc_ctx->extradata_size;
1715 if ( (ost->enc_ctx->flags & (AV_CODEC_FLAG_PASS1 | AV_CODEC_FLAG_PASS2))
1716 != AV_CODEC_FLAG_PASS1)
1717 pass1_used = 0;
1718 }
1719
1720 if (data_size && total_size>0 && total_size >= data_size)
1721 percent = 100.0 * (total_size - data_size) / data_size;
1722
1723 av_log(NULL, AV_LOG_INFO, "video:%1.0fkB audio:%1.0fkB subtitle:%1.0fkB other streams:%1.0fkB global headers:%1.0fkB muxing overhead: ",
1724 video_size / 1024.0,
1725 audio_size / 1024.0,
1726 subtitle_size / 1024.0,
1727 other_size / 1024.0,
1728 extra_size / 1024.0);
1729 if (percent >= 0.0)
1730 av_log(NULL, AV_LOG_INFO, "%f%%", percent);
1731 else
1732 av_log(NULL, AV_LOG_INFO, "unknown");
1733 av_log(NULL, AV_LOG_INFO, "\n");
1734
1735 /* print verbose per-stream stats */
1736 for (i = 0; i < nb_input_files; i++) {
1737 InputFile *f = input_files[i];
1738 uint64_t total_packets = 0, total_size = 0;
1739
1740 av_log(NULL, AV_LOG_VERBOSE, "Input file #%d (%s):\n",
1741 i, f->ctx->url);
1742
1743 for (j = 0; j < f->nb_streams; j++) {
1745 enum AVMediaType type = ist->dec_ctx->codec_type;
1746
1747 total_size += ist->data_size;
1748 total_packets += ist->nb_packets;
1749
1750 av_log(NULL, AV_LOG_VERBOSE, " Input stream #%d:%d (%s): ",
1751 i, j, media_type_string(type));
1752 av_log(NULL, AV_LOG_VERBOSE, "%"PRIu64" packets read (%"PRIu64" bytes); ",
1753 ist->nb_packets, ist->data_size);
1754
1755 if (ist->decoding_needed) {
1756 av_log(NULL, AV_LOG_VERBOSE, "%"PRIu64" frames decoded",
1757 ist->frames_decoded);
1758 if (type == AVMEDIA_TYPE_AUDIO)
1759 av_log(NULL, AV_LOG_VERBOSE, " (%"PRIu64" samples)", ist->samples_decoded);
1760 av_log(NULL, AV_LOG_VERBOSE, "; ");
1761 }
1762
1763 av_log(NULL, AV_LOG_VERBOSE, "\n");
1764 }
1765
1766 av_log(NULL, AV_LOG_VERBOSE, " Total: %"PRIu64" packets (%"PRIu64" bytes) demuxed\n",
1767 total_packets, total_size);
1768 }
1769
1770 for (i = 0; i < nb_output_files; i++) {
1771 OutputFile *of = output_files[i];
1772 uint64_t total_packets = 0, total_size = 0;
1773
1774 av_log(NULL, AV_LOG_VERBOSE, "Output file #%d (%s):\n",
1775 i, of->ctx->url);
1776
1777 for (j = 0; j < of->ctx->nb_streams; j++) {
1779 enum AVMediaType type = ost->enc_ctx->codec_type;
1780
1781 total_size += ost->data_size;
1782 total_packets += ost->packets_written;
1783
1784 av_log(NULL, AV_LOG_VERBOSE, " Output stream #%d:%d (%s): ",
1785 i, j, media_type_string(type));
1786 if (ost->encoding_needed) {
1787 av_log(NULL, AV_LOG_VERBOSE, "%"PRIu64" frames encoded",
1789 if (type == AVMEDIA_TYPE_AUDIO)
1790 av_log(NULL, AV_LOG_VERBOSE, " (%"PRIu64" samples)", ost->samples_encoded);
1791 av_log(NULL, AV_LOG_VERBOSE, "; ");
1792 }
1793
1794 av_log(NULL, AV_LOG_VERBOSE, "%"PRIu64" packets muxed (%"PRIu64" bytes); ",
1796
1797 av_log(NULL, AV_LOG_VERBOSE, "\n");
1798 }
1799
1800 av_log(NULL, AV_LOG_VERBOSE, " Total: %"PRIu64" packets (%"PRIu64" bytes) muxed\n",
1801 total_packets, total_size);
1802 }
1803 if(video_size + data_size + audio_size + subtitle_size + extra_size == 0){
1804 av_log(NULL, AV_LOG_WARNING, "Output file is empty, nothing was encoded ");
1805 if (pass1_used) {
1806 av_log(NULL, AV_LOG_WARNING, "\n");
1807 } else {
1808 av_log(NULL, AV_LOG_WARNING, "(check -ss / -t / -frames parameters if used)\n");
1809 }
1810 }
1811}
1812
1813static void forward_report(int is_last_report, int64_t timer_start, int64_t cur_time)
1814{
1815 AVFormatContext *oc = NULL;
1816 AVCodecContext *enc = NULL;
1817 OutputStream *ost = NULL;
1818 int64_t pts = INT64_MIN + 1;
1819 int vid, i;
1820
1821 int frame_number = 0;
1822 float fps = 0;
1823 float quality = 0;
1824 int64_t total_size = 0;
1825 int seconds = 0;
1826 double bitrate = 0.0;
1827 double speed = 0.0;
1828
1829 float t = (cur_time-timer_start) / 1000000.0;
1830
1831 oc = output_files[0]->ctx;
1832
1833 // 1. calculate size
1834 total_size = avio_size(oc->pb);
1835 if (total_size <= 0) {
1836 total_size = avio_tell(oc->pb);
1837 }
1838
1839 vid = 0;
1840 for (i = 0; i < nb_output_streams; i++) {
1841 ost = output_streams[i];
1842 enc = ost->enc_ctx;
1843
1844 if (!ost->stream_copy) {
1845
1846 // 2. extract quality
1847 quality = ost->quality / (float) FF_QP2LAMBDA;
1848 }
1849
1850 if (!vid && enc->codec_type == AVMEDIA_TYPE_VIDEO) {
1851
1852 // 3. extract frame number
1853 frame_number = ost->frame_number;
1854
1855 // 4. calculate fps
1856 fps = t > 1 ? frame_number / t : 0;
1857 }
1858
1859 // 5. calculate time
1860 if (av_stream_get_end_pts(ost->st) != AV_NOPTS_VALUE)
1861 pts = FFMAX(pts, av_rescale_q(av_stream_get_end_pts(ost->st),
1862 ost->st->time_base, AV_TIME_BASE_Q));
1863
1864 vid = 1;
1865 }
1866
1867 // 6. calculate time, with microseconds to milliseconds conversion
1868 seconds = FFABS(pts) / 1000;
1869
1870 // 7. calculating kbit/s value
1871 bitrate = pts && total_size >= 0 ? total_size * 8 / (pts / 1000.0) : -1;
1872
1873 // 9. calculate processing speed = processed stream duration/operation duration
1874 speed = t != 0.0 ? (double)pts / AV_TIME_BASE / t : -1;
1875
1876 // FORWARD DATA
1877 if (report_callback != NULL) {
1878 report_callback(frame_number, fps, quality, total_size, seconds, bitrate, speed);
1879 }
1880}
1881
1882static void print_report(int is_last_report, int64_t timer_start, int64_t cur_time)
1883{
1884 AVBPrint buf, buf_script;
1886 AVFormatContext *oc;
1887 int64_t total_size;
1888 AVCodecContext *enc;
1889 int frame_number, vid, i;
1890 double bitrate;
1891 double speed;
1892 int64_t pts = INT64_MIN + 1;
1893 int hours, mins, secs, us;
1894 const char *hours_sign;
1895 int ret;
1896 float t;
1897
1898 if (!is_last_report) {
1899 if (last_time == -1) {
1900 last_time = cur_time;
1901 }
1902 if (((cur_time - last_time) < stats_period && !first_report) ||
1904 return;
1905 last_time = cur_time;
1906 }
1907
1908 forward_report(is_last_report, timer_start, cur_time);
1909
1910 if (!print_stats && !is_last_report && !progress_avio)
1911 return;
1912
1913 t = (cur_time-timer_start) / 1000000.0;
1914
1915
1916 oc = output_files[0]->ctx;
1917
1918 total_size = avio_size(oc->pb);
1919 if (total_size <= 0) // FIXME improve avio_size() so it works with non seekable output too
1920 total_size = avio_tell(oc->pb);
1921
1922 vid = 0;
1923 av_bprint_init(&buf, 0, AV_BPRINT_SIZE_AUTOMATIC);
1924 av_bprint_init(&buf_script, 0, AV_BPRINT_SIZE_AUTOMATIC);
1925 for (i = 0; i < nb_output_streams; i++) {
1926 float q = -1;
1927 ost = output_streams[i];
1928 enc = ost->enc_ctx;
1929 if (!ost->stream_copy)
1930 q = ost->quality / (float) FF_QP2LAMBDA;
1931
1932 if (vid && enc->codec_type == AVMEDIA_TYPE_VIDEO) {
1933 av_bprintf(&buf, "q=%2.1f ", q);
1934 av_bprintf(&buf_script, "stream_%d_%d_q=%.1f\n",
1935 ost->file_index, ost->index, q);
1936 }
1937 if (!vid && enc->codec_type == AVMEDIA_TYPE_VIDEO) {
1938 float fps;
1939
1940 frame_number = ost->frame_number;
1941 fps = t > 1 ? frame_number / t : 0;
1942 av_bprintf(&buf, "frame=%5d fps=%3.*f q=%3.1f ",
1943 frame_number, fps < 9.95, fps, q);
1944 av_bprintf(&buf_script, "frame=%d\n", frame_number);
1945 av_bprintf(&buf_script, "fps=%.2f\n", fps);
1946 av_bprintf(&buf_script, "stream_%d_%d_q=%.1f\n",
1947 ost->file_index, ost->index, q);
1948 if (is_last_report)
1949 av_bprintf(&buf, "L");
1950 if (qp_hist) {
1951 int j;
1952 int qp = lrintf(q);
1953 if (qp >= 0 && qp < FF_ARRAY_ELEMS(qp_histogram))
1954 qp_histogram[qp]++;
1955 for (j = 0; j < 32; j++)
1956 av_bprintf(&buf, "%X", av_log2(qp_histogram[j] + 1));
1957 }
1958
1959 if ((enc->flags & AV_CODEC_FLAG_PSNR) && (ost->pict_type != AV_PICTURE_TYPE_NONE || is_last_report)) {
1960 int j;
1961 double error, error_sum = 0;
1962 double scale, scale_sum = 0;
1963 double p;
1964 char type[3] = { 'Y','U','V' };
1965 av_bprintf(&buf, "PSNR=");
1966 for (j = 0; j < 3; j++) {
1967 if (is_last_report) {
1968 error = enc->error[j];
1969 scale = enc->width * enc->height * 255.0 * 255.0 * frame_number;
1970 } else {
1971 error = ost->error[j];
1972 scale = enc->width * enc->height * 255.0 * 255.0;
1973 }
1974 if (j)
1975 scale /= 4;
1976 error_sum += error;
1977 scale_sum += scale;
1978 p = psnr(error / scale);
1979 av_bprintf(&buf, "%c:%2.2f ", type[j], p);
1980 av_bprintf(&buf_script, "stream_%d_%d_psnr_%c=%2.2f\n",
1981 ost->file_index, ost->index, type[j] | 32, p);
1982 }
1983 p = psnr(error_sum / scale_sum);
1984 av_bprintf(&buf, "*:%2.2f ", psnr(error_sum / scale_sum));
1985 av_bprintf(&buf_script, "stream_%d_%d_psnr_all=%2.2f\n",
1986 ost->file_index, ost->index, p);
1987 }
1988 vid = 1;
1989 }
1990 /* compute min output value */
1991 if (av_stream_get_end_pts(ost->st) != AV_NOPTS_VALUE) {
1992 pts = FFMAX(pts, av_rescale_q(av_stream_get_end_pts(ost->st),
1993 ost->st->time_base, AV_TIME_BASE_Q));
1994 if (copy_ts) {
1995 if (copy_ts_first_pts == AV_NOPTS_VALUE && pts > 1)
1997 if (copy_ts_first_pts != AV_NOPTS_VALUE)
1999 }
2000 }
2001
2002 if (is_last_report)
2004 }
2005
2006 secs = FFABS(pts) / AV_TIME_BASE;
2007 us = FFABS(pts) % AV_TIME_BASE;
2008 mins = secs / 60;
2009 secs %= 60;
2010 hours = mins / 60;
2011 mins %= 60;
2012 hours_sign = (pts < 0) ? "-" : "";
2013
2014 bitrate = pts && total_size >= 0 ? total_size * 8 / (pts / 1000.0) : -1;
2015 speed = t != 0.0 ? (double)pts / AV_TIME_BASE / t : -1;
2016
2017 if (total_size < 0) av_bprintf(&buf, "size=N/A time=");
2018 else av_bprintf(&buf, "size=%8.0fkB time=", total_size / 1024.0);
2019 if (pts == AV_NOPTS_VALUE) {
2020 av_bprintf(&buf, "N/A ");
2021 } else {
2022 av_bprintf(&buf, "%s%02d:%02d:%02d.%02d ",
2023 hours_sign, hours, mins, secs, (100 * us) / AV_TIME_BASE);
2024 }
2025
2026 if (bitrate < 0) {
2027 av_bprintf(&buf, "bitrate=N/A");
2028 av_bprintf(&buf_script, "bitrate=N/A\n");
2029 }else{
2030 av_bprintf(&buf, "bitrate=%6.1fkbits/s", bitrate);
2031 av_bprintf(&buf_script, "bitrate=%6.1fkbits/s\n", bitrate);
2032 }
2033
2034 if (total_size < 0) av_bprintf(&buf_script, "total_size=N/A\n");
2035 else av_bprintf(&buf_script, "total_size=%"PRId64"\n", total_size);
2036 if (pts == AV_NOPTS_VALUE) {
2037 av_bprintf(&buf_script, "out_time_us=N/A\n");
2038 av_bprintf(&buf_script, "out_time_ms=N/A\n");
2039 av_bprintf(&buf_script, "out_time=N/A\n");
2040 } else {
2041 av_bprintf(&buf_script, "out_time_us=%"PRId64"\n", pts);
2042 av_bprintf(&buf_script, "out_time_ms=%"PRId64"\n", pts);
2043 av_bprintf(&buf_script, "out_time=%s%02d:%02d:%02d.%06d\n",
2044 hours_sign, hours, mins, secs, us);
2045 }
2046
2048 av_bprintf(&buf, " dup=%d drop=%d", nb_frames_dup, nb_frames_drop);
2049 av_bprintf(&buf_script, "dup_frames=%d\n", nb_frames_dup);
2050 av_bprintf(&buf_script, "drop_frames=%d\n", nb_frames_drop);
2051
2052 if (speed < 0) {
2053 av_bprintf(&buf, " speed=N/A");
2054 av_bprintf(&buf_script, "speed=N/A\n");
2055 } else {
2056 av_bprintf(&buf, " speed=%4.3gx", speed);
2057 av_bprintf(&buf_script, "speed=%4.3gx\n", speed);
2058 }
2059
2060 if (print_stats || is_last_report) {
2061 const char end = is_last_report ? '\n' : '\r';
2062 if (print_stats==1 && AV_LOG_INFO > av_log_get_level()) {
2063 fprintf(stderr, "%s %c", buf.str, end);
2064 } else
2065 av_log(NULL, AV_LOG_INFO, "%s %c", buf.str, end);
2066
2067 fflush(stderr);
2068 }
2069 av_bprint_finalize(&buf, NULL);
2070
2071 if (progress_avio) {
2072 av_bprintf(&buf_script, "progress=%s\n",
2073 is_last_report ? "end" : "continue");
2074 avio_write(progress_avio, buf_script.str,
2075 FFMIN(buf_script.len, buf_script.size - 1));
2076 avio_flush(progress_avio);
2077 av_bprint_finalize(&buf_script, NULL);
2078 if (is_last_report) {
2079 if ((ret = avio_closep(&progress_avio)) < 0)
2080 av_log(NULL, AV_LOG_ERROR,
2081 "Error closing progress log, loss of information possible: %s\n", av_err2str(ret));
2082 }
2083 }
2084
2085 first_report = 0;
2086
2087 if (is_last_report)
2088 print_final_stats(total_size);
2089}
2090
2091static void ifilter_parameters_from_codecpar(InputFilter *ifilter, AVCodecParameters *par)
2092{
2093 // We never got any input. Set a fake format, which will
2094 // come from libavformat.
2095 ifilter->format = par->format;
2096 ifilter->sample_rate = par->sample_rate;
2097 ifilter->channels = par->channels;
2098 ifilter->channel_layout = par->channel_layout;
2099 ifilter->width = par->width;
2100 ifilter->height = par->height;
2101 ifilter->sample_aspect_ratio = par->sample_aspect_ratio;
2102}
2103
2104static void flush_encoders(void)
2105{
2106 int i, ret;
2107
2108 for (i = 0; i < nb_output_streams; i++) {
2110 AVCodecContext *enc = ost->enc_ctx;
2112
2113 if (!ost->encoding_needed)
2114 continue;
2115
2116 // Try to enable encoding with no input frames.
2117 // Maybe we should just let encoding fail instead.
2118 if (!ost->initialized) {
2119 FilterGraph *fg = ost->filter->graph;
2120
2121 av_log(NULL, AV_LOG_WARNING,
2122 "Finishing stream %d:%d without any data written to it.\n",
2123 ost->file_index, ost->st->index);
2124
2125 if (ost->filter && !fg->graph) {
2126 int x;
2127 for (x = 0; x < fg->nb_inputs; x++) {
2128 InputFilter *ifilter = fg->inputs[x];
2129 if (ifilter->format < 0)
2130 ifilter_parameters_from_codecpar(ifilter, ifilter->ist->st->codecpar);
2131 }
2132
2134 continue;
2135
2137 if (ret < 0) {
2138 av_log(NULL, AV_LOG_ERROR, "Error configuring filter graph\n");
2139 exit_program(1);
2140 }
2141
2143 }
2144
2146 }
2147
2148 if (enc->codec_type != AVMEDIA_TYPE_VIDEO && enc->codec_type != AVMEDIA_TYPE_AUDIO)
2149 continue;
2150
2151 for (;;) {
2152 const char *desc = NULL;
2153 AVPacket *pkt = ost->pkt;
2154 int pkt_size;
2155
2156 switch (enc->codec_type) {
2157 case AVMEDIA_TYPE_AUDIO:
2158 desc = "audio";
2159 break;
2160 case AVMEDIA_TYPE_VIDEO:
2161 desc = "video";
2162 break;
2163 default:
2164 av_assert0(0);
2165 }
2166
2167 update_benchmark(NULL);
2168
2169 av_packet_unref(pkt);
2170 while ((ret = avcodec_receive_packet(enc, pkt)) == AVERROR(EAGAIN)) {
2171 ret = avcodec_send_frame(enc, NULL);
2172 if (ret < 0) {
2173 av_log(NULL, AV_LOG_FATAL, "%s encoding failed: %s\n",
2174 desc,
2175 av_err2str(ret));
2176 exit_program(1);
2177 }
2178 }
2179
2180 update_benchmark("flush_%s %d.%d", desc, ost->file_index, ost->index);
2181 if (ret < 0 && ret != AVERROR_EOF) {
2182 av_log(NULL, AV_LOG_FATAL, "%s encoding failed: %s\n",
2183 desc,
2184 av_err2str(ret));
2185 exit_program(1);
2186 }
2187 if (ost->logfile && enc->stats_out) {
2188 fprintf(ost->logfile, "%s", enc->stats_out);
2189 }
2190 if (ret == AVERROR_EOF) {
2191 output_packet(of, pkt, ost, 1);
2192 break;
2193 }
2194 if (ost->finished & MUXER_FINISHED) {
2195 av_packet_unref(pkt);
2196 continue;
2197 }
2198 av_packet_rescale_ts(pkt, enc->time_base, ost->mux_timebase);
2199 pkt_size = pkt->size;
2200 output_packet(of, pkt, ost, 0);
2201 if (ost->enc_ctx->codec_type == AVMEDIA_TYPE_VIDEO && vstats_filename) {
2202 do_video_stats(ost, pkt_size);
2203 }
2204 }
2205 }
2206}
2207
2208/*
2209 * Check whether a packet from ist should be written into ost at this time
2210 */
2212{
2214 int ist_index = input_files[ist->file_index]->ist_index + ist->st->index;
2215
2216 if (ost->source_index != ist_index)
2217 return 0;
2218
2219 if (ost->finished)
2220 return 0;
2221
2222 if (of->start_time != AV_NOPTS_VALUE && ist->pts < of->start_time)
2223 return 0;
2224
2225 return 1;
2226}
2227
2228static void do_streamcopy(InputStream *ist, OutputStream *ost, const AVPacket *pkt)
2229{
2231 InputFile *f = input_files [ist->file_index];
2232 int64_t start_time = (of->start_time == AV_NOPTS_VALUE) ? 0 : of->start_time;
2233 int64_t ost_tb_start_time = av_rescale_q(start_time, AV_TIME_BASE_Q, ost->mux_timebase);
2234 AVPacket *opkt = ost->pkt;
2235
2236 av_packet_unref(opkt);
2237 // EOF: flush output bitstream filters.
2238 if (!pkt) {
2239 output_packet(of, opkt, ost, 1);
2240 return;
2241 }
2242
2243 if ((!ost->frame_number && !(pkt->flags & AV_PKT_FLAG_KEY)) &&
2245 return;
2246
2247 if (!ost->frame_number && !ost->copy_prior_start) {
2248 int64_t comp_start = start_time;
2249 if (copy_ts && f->start_time != AV_NOPTS_VALUE)
2250 comp_start = FFMAX(start_time, f->start_time + f->ts_offset);
2251 if (pkt->pts == AV_NOPTS_VALUE ?
2252 ist->pts < comp_start :
2253 pkt->pts < av_rescale_q(comp_start, AV_TIME_BASE_Q, ist->st->time_base))
2254 return;
2255 }
2256
2257 if (of->recording_time != INT64_MAX &&
2258 ist->pts >= of->recording_time + start_time) {
2260 return;
2261 }
2262
2263 if (f->recording_time != INT64_MAX) {
2264 start_time = 0;
2265 if (copy_ts) {
2266 start_time += f->start_time != AV_NOPTS_VALUE ? f->start_time : 0;
2267 start_time += start_at_zero ? 0 : f->ctx->start_time;
2268 }
2269 if (ist->pts >= f->recording_time + start_time) {
2271 return;
2272 }
2273 }
2274
2275 /* force the input stream PTS */
2276 if (ost->enc_ctx->codec_type == AVMEDIA_TYPE_VIDEO)
2277 ost->sync_opts++;
2278
2279 if (av_packet_ref(opkt, pkt) < 0)
2280 exit_program(1);
2281
2282 if (pkt->pts != AV_NOPTS_VALUE)
2283 opkt->pts = av_rescale_q(pkt->pts, ist->st->time_base, ost->mux_timebase) - ost_tb_start_time;
2284
2285 if (pkt->dts == AV_NOPTS_VALUE) {
2286 opkt->dts = av_rescale_q(ist->dts, AV_TIME_BASE_Q, ost->mux_timebase);
2287 } else if (ost->st->codecpar->codec_type == AVMEDIA_TYPE_AUDIO) {
2288 int duration = av_get_audio_frame_duration(ist->dec_ctx, pkt->size);
2289 if(!duration)
2290 duration = ist->dec_ctx->frame_size;
2291 opkt->dts = av_rescale_delta(ist->st->time_base, pkt->dts,
2292 (AVRational){1, ist->dec_ctx->sample_rate}, duration,
2293 &ist->filter_in_rescale_delta_last, ost->mux_timebase);
2294 /* dts will be set immediately afterwards to what pts is now */
2295 opkt->pts = opkt->dts - ost_tb_start_time;
2296 } else
2297 opkt->dts = av_rescale_q(pkt->dts, ist->st->time_base, ost->mux_timebase);
2298 opkt->dts -= ost_tb_start_time;
2299
2300 opkt->duration = av_rescale_q(pkt->duration, ist->st->time_base, ost->mux_timebase);
2301
2302 output_packet(of, opkt, ost, 0);
2303}
2304
2306{
2307 AVCodecContext *dec = ist->dec_ctx;
2308
2309 if (!dec->channel_layout) {
2310 char layout_name[256];
2311
2312 if (dec->channels > ist->guess_layout_max)
2313 return 0;
2314 dec->channel_layout = av_get_default_channel_layout(dec->channels);
2315 if (!dec->channel_layout)
2316 return 0;
2317 av_get_channel_layout_string(layout_name, sizeof(layout_name),
2318 dec->channels, dec->channel_layout);
2319 av_log(NULL, AV_LOG_WARNING, "Guessed Channel Layout for Input Stream "
2320 "#%d.%d : %s\n", ist->file_index, ist->st->index, layout_name);
2321 }
2322 return 1;
2323}
2324
2326{
2327 if (*got_output || ret<0)
2328 decode_error_stat[ret<0] ++;
2329
2330 if (ret < 0 && exit_on_error)
2331 exit_program(1);
2332
2333 if (*got_output && ist) {
2334 if (ist->decoded_frame->decode_error_flags || (ist->decoded_frame->flags & AV_FRAME_FLAG_CORRUPT)) {
2335 av_log(NULL, exit_on_error ? AV_LOG_FATAL : AV_LOG_WARNING,
2336 "%s: corrupt decoded frame in stream %d\n", input_files[ist->file_index]->ctx->url, ist->st->index);
2337 if (exit_on_error)
2338 exit_program(1);
2339 }
2340 }
2341}
2342
2343// Filters can be configured only if the formats of all inputs are known.
2345{
2346 int i;
2347 for (i = 0; i < fg->nb_inputs; i++) {
2348 if (fg->inputs[i]->format < 0 && (fg->inputs[i]->type == AVMEDIA_TYPE_AUDIO ||
2349 fg->inputs[i]->type == AVMEDIA_TYPE_VIDEO))
2350 return 0;
2351 }
2352 return 1;
2353}
2354
2355static int ifilter_send_frame(InputFilter *ifilter, AVFrame *frame)
2356{
2357 FilterGraph *fg = ifilter->graph;
2358 int need_reinit, ret, i;
2359
2360 /* determine if the parameters for this input changed */
2361 need_reinit = ifilter->format != frame->format;
2362
2363 switch (ifilter->ist->st->codecpar->codec_type) {
2364 case AVMEDIA_TYPE_AUDIO:
2365 need_reinit |= ifilter->sample_rate != frame->sample_rate ||
2366 ifilter->channels != frame->channels ||
2367 ifilter->channel_layout != frame->channel_layout;
2368 break;
2369 case AVMEDIA_TYPE_VIDEO:
2370 need_reinit |= ifilter->width != frame->width ||
2371 ifilter->height != frame->height;
2372 break;
2373 }
2374
2375 if (!ifilter->ist->reinit_filters && fg->graph)
2376 need_reinit = 0;
2377
2378 if (!!ifilter->hw_frames_ctx != !!frame->hw_frames_ctx ||
2379 (ifilter->hw_frames_ctx && ifilter->hw_frames_ctx->data != frame->hw_frames_ctx->data))
2380 need_reinit = 1;
2381
2382 if (need_reinit) {
2383 ret = ifilter_parameters_from_frame(ifilter, frame);
2384 if (ret < 0)
2385 return ret;
2386 }
2387
2388 /* (re)init the graph if possible, otherwise buffer the frame and return */
2389 if (need_reinit || !fg->graph) {
2390 for (i = 0; i < fg->nb_inputs; i++) {
2392 AVFrame *tmp = av_frame_clone(frame);
2393 if (!tmp)
2394 return AVERROR(ENOMEM);
2395 av_frame_unref(frame);
2396
2397 if (!av_fifo_space(ifilter->frame_queue)) {
2398 ret = av_fifo_realloc2(ifilter->frame_queue, 2 * av_fifo_size(ifilter->frame_queue));
2399 if (ret < 0) {
2400 av_frame_free(&tmp);
2401 return ret;
2402 }
2403 }
2404 av_fifo_generic_write(ifilter->frame_queue, &tmp, sizeof(tmp), NULL);
2405 return 0;
2406 }
2407 }
2408
2409 ret = reap_filters(1);
2410 if (ret < 0 && ret != AVERROR_EOF) {
2411 av_log(NULL, AV_LOG_ERROR, "Error while filtering: %s\n", av_err2str(ret));
2412 return ret;
2413 }
2414
2416 if (ret < 0) {
2417 av_log(NULL, AV_LOG_ERROR, "Error reinitializing filters!\n");
2418 return ret;
2419 }
2420 }
2421
2422 ret = av_buffersrc_add_frame_flags(ifilter->filter, frame, AV_BUFFERSRC_FLAG_PUSH);
2423 if (ret < 0) {
2424 if (ret != AVERROR_EOF)
2425 av_log(NULL, AV_LOG_ERROR, "Error while filtering: %s\n", av_err2str(ret));
2426 return ret;
2427 }
2428
2429 return 0;
2430}
2431
2432static int ifilter_send_eof(InputFilter *ifilter, int64_t pts)
2433{
2434 int ret = 0;
2435
2436 ifilter->eof = 1;
2437
2438 if (ifilter->filter) {
2439
2440 /* THIS VALIDATION IS REQUIRED TO COMPLETE CANCELLATION */
2442 ret = av_buffersrc_close(ifilter->filter, pts, AV_BUFFERSRC_FLAG_PUSH);
2443 }
2444 if (ret < 0)
2445 return ret;
2446 } else {
2447 // the filtergraph was never configured
2448 if (ifilter->format < 0)
2449 ifilter_parameters_from_codecpar(ifilter, ifilter->ist->st->codecpar);
2450 if (ifilter->format < 0 && (ifilter->type == AVMEDIA_TYPE_AUDIO || ifilter->type == AVMEDIA_TYPE_VIDEO)) {
2451 av_log(NULL, AV_LOG_ERROR, "Cannot determine format of input stream %d:%d after EOF\n", ifilter->ist->file_index, ifilter->ist->st->index);
2452 return AVERROR_INVALIDDATA;
2453 }
2454 }
2455
2456 return 0;
2457}
2458
2459// This does not quite work like avcodec_decode_audio4/avcodec_decode_video2.
2460// There is the following difference: if you got a frame, you must call
2461// it again with pkt=NULL. pkt==NULL is treated differently from pkt->size==0
2462// (pkt==NULL means get more output, pkt->size==0 is a flush/drain packet)
2463static int decode(AVCodecContext *avctx, AVFrame *frame, int *got_frame, AVPacket *pkt)
2464{
2465 int ret;
2466
2467 *got_frame = 0;
2468
2469 if (pkt) {
2470 ret = avcodec_send_packet(avctx, pkt);
2471 // In particular, we don't expect AVERROR(EAGAIN), because we read all
2472 // decoded frames with avcodec_receive_frame() until done.
2473 if (ret < 0 && ret != AVERROR_EOF)
2474 return ret;
2475 }
2476
2477 ret = avcodec_receive_frame(avctx, frame);
2478 if (ret < 0 && ret != AVERROR(EAGAIN))
2479 return ret;
2480 if (ret >= 0)
2481 *got_frame = 1;
2482
2483 return 0;
2484}
2485
2487{
2488 int i, ret;
2489 AVFrame *f;
2490
2491 av_assert1(ist->nb_filters > 0); /* ensure ret is initialized */
2492 for (i = 0; i < ist->nb_filters; i++) {
2493 if (i < ist->nb_filters - 1) {
2494 f = ist->filter_frame;
2495 ret = av_frame_ref(f, decoded_frame);
2496 if (ret < 0)
2497 break;
2498 } else
2499 f = decoded_frame;
2500 ret = ifilter_send_frame(ist->filters[i], f);
2501 if (ret == AVERROR_EOF)
2502 ret = 0; /* ignore */
2503 if (ret < 0) {
2504 av_log(NULL, AV_LOG_ERROR,
2505 "Failed to inject frame into filter network: %s\n", av_err2str(ret));
2506 break;
2507 }
2508 }
2509 return ret;
2510}
2511
2512static int decode_audio(InputStream *ist, AVPacket *pkt, int *got_output,
2513 int *decode_failed)
2514{
2515 AVFrame *decoded_frame;
2516 AVCodecContext *avctx = ist->dec_ctx;
2517 int ret, err = 0;
2518 AVRational decoded_frame_tb;
2519
2520 if (!ist->decoded_frame && !(ist->decoded_frame = av_frame_alloc()))
2521 return AVERROR(ENOMEM);
2522 if (!ist->filter_frame && !(ist->filter_frame = av_frame_alloc()))
2523 return AVERROR(ENOMEM);
2524 decoded_frame = ist->decoded_frame;
2525
2526 update_benchmark(NULL);
2528 update_benchmark("decode_audio %d.%d", ist->file_index, ist->st->index);
2529 if (ret < 0)
2530 *decode_failed = 1;
2531
2532 if (ret >= 0 && avctx->sample_rate <= 0) {
2533 av_log(avctx, AV_LOG_ERROR, "Sample rate %d invalid\n", avctx->sample_rate);
2534 ret = AVERROR_INVALIDDATA;
2535 }
2536
2537 if (ret != AVERROR_EOF)
2539
2540 if (!*got_output || ret < 0)
2541 return ret;
2542
2543 ist->samples_decoded += decoded_frame->nb_samples;
2544 ist->frames_decoded++;
2545
2546 /* increment next_dts to use for the case where the input stream does not
2547 have timestamps or there are multiple frames in the packet */
2548 ist->next_pts += ((int64_t)AV_TIME_BASE * decoded_frame->nb_samples) /
2549 avctx->sample_rate;
2550 ist->next_dts += ((int64_t)AV_TIME_BASE * decoded_frame->nb_samples) /
2551 avctx->sample_rate;
2552
2553 if (decoded_frame->pts != AV_NOPTS_VALUE) {
2554 decoded_frame_tb = ist->st->time_base;
2555 } else if (pkt && pkt->pts != AV_NOPTS_VALUE) {
2556 decoded_frame->pts = pkt->pts;
2557 decoded_frame_tb = ist->st->time_base;
2558 }else {
2559 decoded_frame->pts = ist->dts;
2560 decoded_frame_tb = AV_TIME_BASE_Q;
2561 }
2562 if (decoded_frame->pts != AV_NOPTS_VALUE)
2563 decoded_frame->pts = av_rescale_delta(decoded_frame_tb, decoded_frame->pts,
2564 (AVRational){1, avctx->sample_rate}, decoded_frame->nb_samples, &ist->filter_in_rescale_delta_last,
2565 (AVRational){1, avctx->sample_rate});
2566 ist->nb_samples = decoded_frame->nb_samples;
2568
2569 av_frame_unref(ist->filter_frame);
2570 av_frame_unref(decoded_frame);
2571 return err < 0 ? err : ret;
2572}
2573
2574static int decode_video(InputStream *ist, AVPacket *pkt, int *got_output, int64_t *duration_pts, int eof,
2575 int *decode_failed)
2576{
2577 AVFrame *decoded_frame;
2578 int i, ret = 0, err = 0;
2579 int64_t best_effort_timestamp;
2580 int64_t dts = AV_NOPTS_VALUE;
2581
2582 // With fate-indeo3-2, we're getting 0-sized packets before EOF for some
2583 // reason. This seems like a semi-critical bug. Don't trigger EOF, and
2584 // skip the packet.
2585 if (!eof && pkt && pkt->size == 0)
2586 return 0;
2587
2588 if (!ist->decoded_frame && !(ist->decoded_frame = av_frame_alloc()))
2589 return AVERROR(ENOMEM);
2590 if (!ist->filter_frame && !(ist->filter_frame = av_frame_alloc()))
2591 return AVERROR(ENOMEM);
2592 decoded_frame = ist->decoded_frame;
2593 if (ist->dts != AV_NOPTS_VALUE)
2594 dts = av_rescale_q(ist->dts, AV_TIME_BASE_Q, ist->st->time_base);
2595 if (pkt) {
2596 pkt->dts = dts; // ffmpeg.c probably shouldn't do this
2597 }
2598
2599 // The old code used to set dts on the drain packet, which does not work
2600 // with the new API anymore.
2601 if (eof) {
2602 void *new = av_realloc_array(ist->dts_buffer, ist->nb_dts_buffer + 1, sizeof(ist->dts_buffer[0]));
2603 if (!new)
2604 return AVERROR(ENOMEM);
2605 ist->dts_buffer = new;
2606 ist->dts_buffer[ist->nb_dts_buffer++] = dts;
2607 }
2608
2609 update_benchmark(NULL);
2610 ret = decode(ist->dec_ctx, decoded_frame, got_output, pkt);
2611 update_benchmark("decode_video %d.%d", ist->file_index, ist->st->index);
2612 if (ret < 0)
2613 *decode_failed = 1;
2614
2615 // The following line may be required in some cases where there is no parser
2616 // or the parser does not has_b_frames correctly
2617 if (ist->st->codecpar->video_delay < ist->dec_ctx->has_b_frames) {
2618 if (ist->dec_ctx->codec_id == AV_CODEC_ID_H264) {
2619 ist->st->codecpar->video_delay = ist->dec_ctx->has_b_frames;
2620 } else
2621 av_log(ist->dec_ctx, AV_LOG_WARNING,
2622 "video_delay is larger in decoder than demuxer %d > %d.\n"
2623 "If you want to help, upload a sample "
2624 "of this file to https://streams.videolan.org/upload/ "
2625 "and contact the ffmpeg-devel mailing list. (ffmpeg-devel@ffmpeg.org)\n",
2626 ist->dec_ctx->has_b_frames,
2627 ist->st->codecpar->video_delay);
2628 }
2629
2630 if (ret != AVERROR_EOF)
2632
2633 if (*got_output && ret >= 0) {
2634 if (ist->dec_ctx->width != decoded_frame->width ||
2635 ist->dec_ctx->height != decoded_frame->height ||
2636 ist->dec_ctx->pix_fmt != decoded_frame->format) {
2637 av_log(NULL, AV_LOG_DEBUG, "Frame parameters mismatch context %d,%d,%d != %d,%d,%d\n",
2638 decoded_frame->width,
2639 decoded_frame->height,
2640 decoded_frame->format,
2641 ist->dec_ctx->width,
2642 ist->dec_ctx->height,
2643 ist->dec_ctx->pix_fmt);
2644 }
2645 }
2646
2647 if (!*got_output || ret < 0)
2648 return ret;
2649
2650 if(ist->top_field_first>=0)
2651 decoded_frame->top_field_first = ist->top_field_first;
2652
2653 ist->frames_decoded++;
2654
2655 if (ist->hwaccel_retrieve_data && decoded_frame->format == ist->hwaccel_pix_fmt) {
2656 err = ist->hwaccel_retrieve_data(ist->dec_ctx, decoded_frame);
2657 if (err < 0)
2658 goto fail;
2659 }
2660 ist->hwaccel_retrieved_pix_fmt = decoded_frame->format;
2661
2662 best_effort_timestamp= decoded_frame->best_effort_timestamp;
2663 *duration_pts = decoded_frame->pkt_duration;
2664
2665 if (ist->framerate.num)
2666 best_effort_timestamp = ist->cfr_next_pts++;
2667
2668 if (eof && best_effort_timestamp == AV_NOPTS_VALUE && ist->nb_dts_buffer > 0) {
2669 best_effort_timestamp = ist->dts_buffer[0];
2670
2671 for (i = 0; i < ist->nb_dts_buffer - 1; i++)
2672 ist->dts_buffer[i] = ist->dts_buffer[i + 1];
2673 ist->nb_dts_buffer--;
2674 }
2675
2676 if(best_effort_timestamp != AV_NOPTS_VALUE) {
2677 int64_t ts = av_rescale_q(decoded_frame->pts = best_effort_timestamp, ist->st->time_base, AV_TIME_BASE_Q);
2678
2679 if (ts != AV_NOPTS_VALUE)
2680 ist->next_pts = ist->pts = ts;
2681 }
2682
2683 if (debug_ts) {
2684 av_log(NULL, AV_LOG_INFO, "decoder -> ist_index:%d type:video "
2685 "frame_pts:%s frame_pts_time:%s best_effort_ts:%"PRId64" best_effort_ts_time:%s keyframe:%d frame_type:%d time_base:%d/%d\n",
2686 ist->st->index, av_ts2str(decoded_frame->pts),
2687 av_ts2timestr(decoded_frame->pts, &ist->st->time_base),
2688 best_effort_timestamp,
2689 av_ts2timestr(best_effort_timestamp, &ist->st->time_base),
2690 decoded_frame->key_frame, decoded_frame->pict_type,
2691 ist->st->time_base.num, ist->st->time_base.den);
2692 }
2693
2694 if (ist->st->sample_aspect_ratio.num)
2695 decoded_frame->sample_aspect_ratio = ist->st->sample_aspect_ratio;
2696
2698
2699fail:
2700 av_frame_unref(ist->filter_frame);
2701 av_frame_unref(decoded_frame);
2702 return err < 0 ? err : ret;
2703}
2704
2705static int transcode_subtitles(InputStream *ist, AVPacket *pkt, int *got_output,
2706 int *decode_failed)
2707{
2708 AVSubtitle subtitle;
2709 int free_sub = 1;
2710 int i, ret = avcodec_decode_subtitle2(ist->dec_ctx,
2712
2714
2715 if (ret < 0 || !*got_output) {
2716 *decode_failed = 1;
2717 if (!pkt->size)
2719 return ret;
2720 }
2721
2722 if (ist->fix_sub_duration) {
2723 int end = 1;
2724 if (ist->prev_sub.got_output) {
2725 end = av_rescale(subtitle.pts - ist->prev_sub.subtitle.pts,
2726 1000, AV_TIME_BASE);
2727 if (end < ist->prev_sub.subtitle.end_display_time) {
2728 av_log(ist->dec_ctx, AV_LOG_DEBUG,
2729 "Subtitle duration reduced from %"PRId32" to %d%s\n",
2730 ist->prev_sub.subtitle.end_display_time, end,
2731 end <= 0 ? ", dropping it" : "");
2732 ist->prev_sub.subtitle.end_display_time = end;
2733 }
2734 }
2735 FFSWAP(int, *got_output, ist->prev_sub.got_output);
2736 FFSWAP(int, ret, ist->prev_sub.ret);
2737 FFSWAP(AVSubtitle, subtitle, ist->prev_sub.subtitle);
2738 if (end <= 0)
2739 goto out;
2740 }
2741
2742 if (!*got_output)
2743 return ret;
2744
2745 if (ist->sub2video.frame) {
2746 sub2video_update(ist, INT64_MIN, &subtitle);
2747 } else if (ist->nb_filters) {
2748 if (!ist->sub2video.sub_queue)
2749 ist->sub2video.sub_queue = av_fifo_alloc(8 * sizeof(AVSubtitle));
2750 if (!ist->sub2video.sub_queue)
2751 exit_program(1);
2752 if (!av_fifo_space(ist->sub2video.sub_queue)) {
2753 ret = av_fifo_realloc2(ist->sub2video.sub_queue, 2 * av_fifo_size(ist->sub2video.sub_queue));
2754 if (ret < 0)
2755 exit_program(1);
2756 }
2757 av_fifo_generic_write(ist->sub2video.sub_queue, &subtitle, sizeof(subtitle), NULL);
2758 free_sub = 0;
2759 }
2760
2761 if (!subtitle.num_rects)
2762 goto out;
2763
2764 ist->frames_decoded++;
2765
2766 for (i = 0; i < nb_output_streams; i++) {
2768
2769 if (!ost->pkt && !(ost->pkt = av_packet_alloc()))
2770 exit_program(1);
2772 || ost->enc->type != AVMEDIA_TYPE_SUBTITLE)
2773 continue;
2774
2776 }
2777
2778out:
2779 if (free_sub)
2780 avsubtitle_free(&subtitle);
2781 return ret;
2782}
2783
2785{
2786 int i, ret;
2787 /* TODO keep pts also in stream time base to avoid converting back */
2788 int64_t pts = av_rescale_q_rnd(ist->pts, AV_TIME_BASE_Q, ist->st->time_base,
2789 AV_ROUND_NEAR_INF | AV_ROUND_PASS_MINMAX);
2790
2791 for (i = 0; i < ist->nb_filters; i++) {
2792 ret = ifilter_send_eof(ist->filters[i], pts);
2793 if (ret < 0)
2794 return ret;
2795 }
2796 return 0;
2797}
2798
2799/* pkt = NULL means EOF (needed to flush decoder buffers) */
2800static int process_input_packet(InputStream *ist, const AVPacket *pkt, int no_eof)
2801{
2802 int ret = 0, i;
2803 int repeating = 0;
2804 int eof_reached = 0;
2805
2806 AVPacket *avpkt;
2807
2808 if (!ist->pkt && !(ist->pkt = av_packet_alloc()))
2809 return AVERROR(ENOMEM);
2810 avpkt = ist->pkt;
2811
2812 if (!ist->saw_first_ts) {
2813 ist->first_dts =
2814 ist->dts = ist->st->avg_frame_rate.num ? - ist->dec_ctx->has_b_frames * AV_TIME_BASE / av_q2d(ist->st->avg_frame_rate) : 0;
2815 ist->pts = 0;
2816 if (pkt && pkt->pts != AV_NOPTS_VALUE && !ist->decoding_needed) {
2817 ist->first_dts =
2818 ist->dts += av_rescale_q(pkt->pts, ist->st->time_base, AV_TIME_BASE_Q);
2819 ist->pts = ist->dts; //unused but better to set it to a value thats not totally wrong
2820 }
2821 ist->saw_first_ts = 1;
2822 }
2823
2824 if (ist->next_dts == AV_NOPTS_VALUE)
2825 ist->next_dts = ist->dts;
2826 if (ist->next_pts == AV_NOPTS_VALUE)
2827 ist->next_pts = ist->pts;
2828
2829 if (pkt) {
2830 av_packet_unref(avpkt);
2831 ret = av_packet_ref(avpkt, pkt);
2832 if (ret < 0)
2833 return ret;
2834 }
2835
2836 if (pkt && pkt->dts != AV_NOPTS_VALUE) {
2837 ist->next_dts = ist->dts = av_rescale_q(pkt->dts, ist->st->time_base, AV_TIME_BASE_Q);
2838 if (ist->dec_ctx->codec_type != AVMEDIA_TYPE_VIDEO || !ist->decoding_needed)
2839 ist->next_pts = ist->pts = ist->dts;
2840 }
2841
2842 // while we have more to decode or while the decoder did output something on EOF
2843 while (ist->decoding_needed) {
2844 int64_t duration_dts = 0;
2845 int64_t duration_pts = 0;
2846 int got_output = 0;
2847 int decode_failed = 0;
2848
2849 ist->pts = ist->next_pts;
2850 ist->dts = ist->next_dts;
2851
2852 switch (ist->dec_ctx->codec_type) {
2853 case AVMEDIA_TYPE_AUDIO:
2854 ret = decode_audio (ist, repeating ? NULL : avpkt, &got_output,
2855 &decode_failed);
2856 av_packet_unref(avpkt);
2857 break;
2858 case AVMEDIA_TYPE_VIDEO:
2859 ret = decode_video (ist, repeating ? NULL : avpkt, &got_output, &duration_pts, !pkt,
2860 &decode_failed);
2861 if (!repeating || !pkt || got_output) {
2862 if (pkt && pkt->duration) {
2863 duration_dts = av_rescale_q(pkt->duration, ist->st->time_base, AV_TIME_BASE_Q);
2864 } else if(ist->dec_ctx->framerate.num != 0 && ist->dec_ctx->framerate.den != 0) {
2865 int ticks= av_stream_get_parser(ist->st) ? av_stream_get_parser(ist->st)->repeat_pict+1 : ist->dec_ctx->ticks_per_frame;
2866 duration_dts = ((int64_t)AV_TIME_BASE *
2867 ist->dec_ctx->framerate.den * ticks) /
2868 ist->dec_ctx->framerate.num / ist->dec_ctx->ticks_per_frame;
2869 }
2870
2871 if(ist->dts != AV_NOPTS_VALUE && duration_dts) {
2872 ist->next_dts += duration_dts;
2873 }else
2874 ist->next_dts = AV_NOPTS_VALUE;
2875 }
2876
2877 if (got_output) {
2878 if (duration_pts > 0) {
2879 ist->next_pts += av_rescale_q(duration_pts, ist->st->time_base, AV_TIME_BASE_Q);
2880 } else {
2881 ist->next_pts += duration_dts;
2882 }
2883 }
2884 av_packet_unref(avpkt);
2885 break;
2886 case AVMEDIA_TYPE_SUBTITLE:
2887 if (repeating)
2888 break;
2889 ret = transcode_subtitles(ist, avpkt, &got_output, &decode_failed);
2890 if (!pkt && ret >= 0)
2891 ret = AVERROR_EOF;
2892 av_packet_unref(avpkt);
2893 break;
2894 default:
2895 return -1;
2896 }
2897
2898 if (ret == AVERROR_EOF) {
2899 eof_reached = 1;
2900 break;
2901 }
2902
2903 if (ret < 0) {
2904 if (decode_failed) {
2905 av_log(NULL, AV_LOG_ERROR, "Error while decoding stream #%d:%d: %s\n",
2906 ist->file_index, ist->st->index, av_err2str(ret));
2907 } else {
2908 av_log(NULL, AV_LOG_FATAL, "Error while processing the decoded "
2909 "data for stream #%d:%d\n", ist->file_index, ist->st->index);
2910 }
2911 if (!decode_failed || exit_on_error)
2912 exit_program(1);
2913 break;
2914 }
2915
2916 if (got_output)
2917 ist->got_output = 1;
2918
2919 if (!got_output)
2920 break;
2921
2922 // During draining, we might get multiple output frames in this loop.
2923 // ffmpeg.c does not drain the filter chain on configuration changes,
2924 // which means if we send multiple frames at once to the filters, and
2925 // one of those frames changes configuration, the buffered frames will
2926 // be lost. This can upset certain FATE tests.
2927 // Decode only 1 frame per call on EOF to appease these FATE tests.
2928 // The ideal solution would be to rewrite decoding to use the new
2929 // decoding API in a better way.
2930 if (!pkt)
2931 break;
2932
2933 repeating = 1;
2934 }
2935
2936 /* after flushing, send an EOF on all the filter inputs attached to the stream */
2937 /* except when looping we need to flush but not to send an EOF */
2938 if (!pkt && ist->decoding_needed && eof_reached && !no_eof) {
2939 int ret = send_filter_eof(ist);
2940 if (ret < 0) {
2941 av_log(NULL, AV_LOG_FATAL, "Error marking filters as finished\n");
2942 exit_program(1);
2943 }
2944 }
2945
2946 /* handle stream copy */
2947 if (!ist->decoding_needed && pkt) {
2948 ist->dts = ist->next_dts;
2949 switch (ist->dec_ctx->codec_type) {
2950 case AVMEDIA_TYPE_AUDIO:
2951 av_assert1(pkt->duration >= 0);
2952 if (ist->dec_ctx->sample_rate) {
2953 ist->next_dts += ((int64_t)AV_TIME_BASE * ist->dec_ctx->frame_size) /
2954 ist->dec_ctx->sample_rate;
2955 } else {
2956 ist->next_dts += av_rescale_q(pkt->duration, ist->st->time_base, AV_TIME_BASE_Q);
2957 }
2958 break;
2959 case AVMEDIA_TYPE_VIDEO:
2960 if (ist->framerate.num) {
2961 // TODO: Remove work-around for c99-to-c89 issue 7
2962 AVRational time_base_q = AV_TIME_BASE_Q;
2963 int64_t next_dts = av_rescale_q(ist->next_dts, time_base_q, av_inv_q(ist->framerate));
2964 ist->next_dts = av_rescale_q(next_dts + 1, av_inv_q(ist->framerate), time_base_q);
2965 } else if (pkt->duration) {
2966 ist->next_dts += av_rescale_q(pkt->duration, ist->st->time_base, AV_TIME_BASE_Q);
2967 } else if(ist->dec_ctx->framerate.num != 0) {
2968 int ticks= av_stream_get_parser(ist->st) ? av_stream_get_parser(ist->st)->repeat_pict + 1 : ist->dec_ctx->ticks_per_frame;
2969 ist->next_dts += ((int64_t)AV_TIME_BASE *
2970 ist->dec_ctx->framerate.den * ticks) /
2971 ist->dec_ctx->framerate.num / ist->dec_ctx->ticks_per_frame;
2972 }
2973 break;
2974 }
2975 ist->pts = ist->dts;
2976 ist->next_pts = ist->next_dts;
2977 }
2978 for (i = 0; i < nb_output_streams; i++) {
2980
2981 if (!ost->pkt && !(ost->pkt = av_packet_alloc()))
2982 exit_program(1);
2984 continue;
2985
2987 }
2988
2989 return !eof_reached;
2990}
2991
2992static void print_sdp(void)
2993{
2994 char sdp[16384];
2995 int i;
2996 int j;
2997 AVIOContext *sdp_pb;
2998 AVFormatContext **avc;
2999
3000 for (i = 0; i < nb_output_files; i++) {
3001 if (!output_files[i]->header_written)
3002 return;
3003 }
3004
3005 avc = av_malloc_array(nb_output_files, sizeof(*avc));
3006 if (!avc)
3007 exit_program(1);
3008 for (i = 0, j = 0; i < nb_output_files; i++) {
3009 if (!strcmp(output_files[i]->ctx->oformat->name, "rtp")) {
3010 avc[j] = output_files[i]->ctx;
3011 j++;
3012 }
3013 }
3014
3015 if (!j)
3016 goto fail;
3017
3018 av_sdp_create(avc, j, sdp, sizeof(sdp));
3019
3020 if (!sdp_filename) {
3021 av_log(NULL, AV_LOG_STDERR, "SDP:\n%s\n", sdp);
3022 fflush(stdout);
3023 } else {
3024 if (avio_open2(&sdp_pb, sdp_filename, AVIO_FLAG_WRITE, &int_cb, NULL) < 0) {
3025 av_log(NULL, AV_LOG_ERROR, "Failed to open sdp file '%s'\n", sdp_filename);
3026 } else {
3027 avio_print(sdp_pb, sdp);
3028 avio_closep(&sdp_pb);
3029 av_freep(&sdp_filename);
3030 }
3031 }
3032
3033fail:
3034 av_freep(&avc);
3035}
3036
3037static enum AVPixelFormat get_format(AVCodecContext *s, const enum AVPixelFormat *pix_fmts)
3038{
3039 InputStream *ist = s->opaque;
3040 const enum AVPixelFormat *p;
3041 int ret;
3042
3043 for (p = pix_fmts; *p != AV_PIX_FMT_NONE; p++) {
3044 const AVPixFmtDescriptor *desc = av_pix_fmt_desc_get(*p);
3045 const AVCodecHWConfig *config = NULL;
3046 int i;
3047
3048 if (!(desc->flags & AV_PIX_FMT_FLAG_HWACCEL))
3049 break;
3050
3051 if (ist->hwaccel_id == HWACCEL_GENERIC ||
3052 ist->hwaccel_id == HWACCEL_AUTO) {
3053 for (i = 0;; i++) {
3054 config = avcodec_get_hw_config(s->codec, i);
3055 if (!config)
3056 break;
3057 if (!(config->methods &
3058 AV_CODEC_HW_CONFIG_METHOD_HW_DEVICE_CTX))
3059 continue;
3060 if (config->pix_fmt == *p)
3061 break;
3062 }
3063 }
3064 if (config) {
3065 if (config->device_type != ist->hwaccel_device_type) {
3066 // Different hwaccel offered, ignore.
3067 continue;
3068 }
3069
3071 if (ret < 0) {
3072 if (ist->hwaccel_id == HWACCEL_GENERIC) {
3073 av_log(NULL, AV_LOG_FATAL,
3074 "%s hwaccel requested for input stream #%d:%d, "
3075 "but cannot be initialized.\n",
3076 av_hwdevice_get_type_name(config->device_type),
3077 ist->file_index, ist->st->index);
3078 return AV_PIX_FMT_NONE;
3079 }
3080 continue;
3081 }
3082 } else {
3083 const HWAccel *hwaccel = NULL;
3084 int i;
3085 for (i = 0; hwaccels[i].name; i++) {
3086 if (hwaccels[i].pix_fmt == *p) {
3087 hwaccel = &hwaccels[i];
3088 break;
3089 }
3090 }
3091 if (!hwaccel) {
3092 // No hwaccel supporting this pixfmt.
3093 continue;
3094 }
3095 if (hwaccel->id != ist->hwaccel_id) {
3096 // Does not match requested hwaccel.
3097 continue;
3098 }
3099
3100 ret = hwaccel->init(s);
3101 if (ret < 0) {
3102 av_log(NULL, AV_LOG_FATAL,
3103 "%s hwaccel requested for input stream #%d:%d, "
3104 "but cannot be initialized.\n", hwaccel->name,
3105 ist->file_index, ist->st->index);
3106 return AV_PIX_FMT_NONE;
3107 }
3108 }
3109
3110 if (ist->hw_frames_ctx) {
3111 s->hw_frames_ctx = av_buffer_ref(ist->hw_frames_ctx);
3112 if (!s->hw_frames_ctx)
3113 return AV_PIX_FMT_NONE;
3114 }
3115
3116 ist->hwaccel_pix_fmt = *p;
3117 break;
3118 }
3119
3120 return *p;
3121}
3122
3123static int get_buffer(AVCodecContext *s, AVFrame *frame, int flags)
3124{
3125 InputStream *ist = s->opaque;
3126
3127 if (ist->hwaccel_get_buffer && frame->format == ist->hwaccel_pix_fmt)
3128 return ist->hwaccel_get_buffer(s, frame, flags);
3129
3130 return avcodec_default_get_buffer2(s, frame, flags);
3131}
3132
3133static int init_input_stream(int ist_index, char *error, int error_len)
3134{
3135 int ret;
3136 InputStream *ist = input_streams[ist_index];
3137
3138 if (ist->decoding_needed) {
3139 const AVCodec *codec = ist->dec;
3140 if (!codec) {
3141 snprintf(error, error_len, "Decoder (codec %s) not found for input stream #%d:%d",
3142 avcodec_get_name(ist->dec_ctx->codec_id), ist->file_index, ist->st->index);
3143 return AVERROR(EINVAL);
3144 }
3145
3146 ist->dec_ctx->opaque = ist;
3147 ist->dec_ctx->get_format = get_format;
3148 ist->dec_ctx->get_buffer2 = get_buffer;
3149#if LIBAVCODEC_VERSION_MAJOR < 60
3150FF_DISABLE_DEPRECATION_WARNINGS
3151 ist->dec_ctx->thread_safe_callbacks = 1;
3152FF_ENABLE_DEPRECATION_WARNINGS
3153#endif
3154
3155 if (ist->dec_ctx->codec_id == AV_CODEC_ID_DVB_SUBTITLE &&
3156 (ist->decoding_needed & DECODING_FOR_OST)) {
3157 av_dict_set(&ist->decoder_opts, "compute_edt", "1", AV_DICT_DONT_OVERWRITE);
3158 if (ist->decoding_needed & DECODING_FOR_FILTER)
3159 av_log(NULL, AV_LOG_WARNING, "Warning using DVB subtitles for filtering and output at the same time is not fully supported, also see -compute_edt [0|1]\n");
3160 }
3161
3162 av_dict_set(&ist->decoder_opts, "sub_text_format", "ass", AV_DICT_DONT_OVERWRITE);
3163
3164 /* Useful for subtitles retiming by lavf (FIXME), skipping samples in
3165 * audio, and video decoders such as cuvid or mediacodec */
3166 ist->dec_ctx->pkt_timebase = ist->st->time_base;
3167
3168 if (!av_dict_get(ist->decoder_opts, "threads", NULL, 0))
3169 av_dict_set(&ist->decoder_opts, "threads", "auto", 0);
3170 /* Attached pics are sparse, therefore we would not want to delay their decoding till EOF. */
3171 if (ist->st->disposition & AV_DISPOSITION_ATTACHED_PIC)
3172 av_dict_set(&ist->decoder_opts, "threads", "1", 0);
3173
3175 if (ret < 0) {
3176 snprintf(error, error_len, "Device setup failed for "
3177 "decoder on input stream #%d:%d : %s",
3178 ist->file_index, ist->st->index, av_err2str(ret));
3179 return ret;
3180 }
3181
3182 if ((ret = avcodec_open2(ist->dec_ctx, codec, &ist->decoder_opts)) < 0) {
3183 if (ret == AVERROR_EXPERIMENTAL)
3184 abort_codec_experimental(codec, 0);
3185
3186 snprintf(error, error_len,
3187 "Error while opening decoder for input stream "
3188 "#%d:%d : %s",
3189 ist->file_index, ist->st->index, av_err2str(ret));
3190 return ret;
3191 }
3192 assert_avoptions(ist->decoder_opts);
3193 }
3194
3195 ist->next_pts = AV_NOPTS_VALUE;
3196 ist->next_dts = AV_NOPTS_VALUE;
3197
3198 return 0;
3199}
3200
3202{
3203 if (ost->source_index >= 0)
3205 return NULL;
3206}
3207
3208static int compare_int64(const void *a, const void *b)
3209{
3210 return FFDIFFSIGN(*(const int64_t *)a, *(const int64_t *)b);
3211}
3212
3213/* open the muxer when all the streams are initialized */
3215{
3216 int ret, i;
3217
3218 for (i = 0; i < of->ctx->nb_streams; i++) {
3220 if (!ost->initialized)
3221 return 0;
3222 }
3223
3224 of->ctx->interrupt_callback = int_cb;
3225
3226 ret = avformat_write_header(of->ctx, &of->opts);
3227 if (ret < 0) {
3228 av_log(NULL, AV_LOG_ERROR,
3229 "Could not write header for output file #%d "
3230 "(incorrect codec parameters ?): %s\n",
3231 file_index, av_err2str(ret));
3232 return ret;
3233 }
3234 //assert_avoptions(of->opts);
3235 of->header_written = 1;
3236
3237 av_dump_format(of->ctx, file_index, of->ctx->url, 1);
3239
3240 if (sdp_filename || want_sdp)
3241 print_sdp();
3242
3243 /* flush the muxing queues */
3244 for (i = 0; i < of->ctx->nb_streams; i++) {
3246
3247 /* try to improve muxing time_base (only possible if nothing has been written yet) */
3248 if (!av_fifo_size(ost->muxing_queue))
3249 ost->mux_timebase = ost->st->time_base;
3250
3251 while (av_fifo_size(ost->muxing_queue)) {
3252 AVPacket *pkt;
3253 av_fifo_generic_read(ost->muxing_queue, &pkt, sizeof(pkt), NULL);
3254 ost->muxing_queue_data_size -= pkt->size;
3255 write_packet(of, pkt, ost, 1);
3256 av_packet_free(&pkt);
3257 }
3258 }
3259
3260 return 0;
3261}
3262
3264{
3265 AVBSFContext *ctx = ost->bsf_ctx;
3266 int ret;
3267
3268 if (!ctx)
3269 return 0;
3270
3271 ret = avcodec_parameters_copy(ctx->par_in, ost->st->codecpar);
3272 if (ret < 0)
3273 return ret;
3274
3275 ctx->time_base_in = ost->st->time_base;
3276
3277 ret = av_bsf_init(ctx);
3278 if (ret < 0) {
3279 av_log(NULL, AV_LOG_ERROR, "Error initializing bitstream filter: %s\n",
3280 ctx->filter->name);
3281 return ret;
3282 }
3283
3284 ret = avcodec_parameters_copy(ost->st->codecpar, ctx->par_out);
3285 if (ret < 0)
3286 return ret;
3287
3288 ost->st->time_base = ctx->time_base_out;
3289
3290 return 0;
3291}
3292
3294{
3297 AVCodecParameters *par_dst = ost->st->codecpar;
3298 AVCodecParameters *par_src = ost->ref_par;
3299 AVRational sar;
3300 int i, ret;
3301 uint32_t codec_tag = par_dst->codec_tag;
3302
3303 av_assert0(ist && !ost->filter);
3304
3305 ret = avcodec_parameters_to_context(ost->enc_ctx, ist->st->codecpar);
3306 if (ret >= 0)
3307 ret = av_opt_set_dict(ost->enc_ctx, &ost->encoder_opts);
3308 if (ret < 0) {
3309 av_log(NULL, AV_LOG_FATAL,
3310 "Error setting up codec context options.\n");
3311 return ret;
3312 }
3313
3314 ret = avcodec_parameters_from_context(par_src, ost->enc_ctx);
3315 if (ret < 0) {
3316 av_log(NULL, AV_LOG_FATAL,
3317 "Error getting reference codec parameters.\n");
3318 return ret;
3319 }
3320
3321 if (!codec_tag) {
3322 unsigned int codec_tag_tmp;
3323 if (!of->ctx->oformat->codec_tag ||
3324 av_codec_get_id (of->ctx->oformat->codec_tag, par_src->codec_tag) == par_src->codec_id ||
3325 !av_codec_get_tag2(of->ctx->oformat->codec_tag, par_src->codec_id, &codec_tag_tmp))
3326 codec_tag = par_src->codec_tag;
3327 }
3328
3329 ret = avcodec_parameters_copy(par_dst, par_src);
3330 if (ret < 0)
3331 return ret;
3332
3333 par_dst->codec_tag = codec_tag;
3334
3335 if (!ost->frame_rate.num)
3336 ost->frame_rate = ist->framerate;
3337
3338 if (ost->frame_rate.num)
3339 ost->st->avg_frame_rate = ost->frame_rate;
3340 else
3341 ost->st->avg_frame_rate = ist->st->avg_frame_rate;
3342
3343 ret = avformat_transfer_internal_stream_timing_info(of->ctx->oformat, ost->st, ist->st, copy_tb);
3344 if (ret < 0)
3345 return ret;
3346
3347 // copy timebase while removing common factors
3348 if (ost->st->time_base.num <= 0 || ost->st->time_base.den <= 0) {
3349 if (ost->frame_rate.num)
3350 ost->st->time_base = av_inv_q(ost->frame_rate);
3351 else
3352 ost->st->time_base = av_add_q(av_stream_get_codec_timebase(ost->st), (AVRational){0, 1});
3353 }
3354
3355 // copy estimated duration as a hint to the muxer
3356 if (ost->st->duration <= 0 && ist->st->duration > 0)
3357 ost->st->duration = av_rescale_q(ist->st->duration, ist->st->time_base, ost->st->time_base);
3358
3359 // copy disposition
3360 ost->st->disposition = ist->st->disposition;
3361
3362 if (ist->st->nb_side_data) {
3363 for (i = 0; i < ist->st->nb_side_data; i++) {
3364 const AVPacketSideData *sd_src = &ist->st->side_data[i];
3365 uint8_t *dst_data;
3366
3367 dst_data = av_stream_new_side_data(ost->st, sd_src->type, sd_src->size);
3368 if (!dst_data)
3369 return AVERROR(ENOMEM);
3370 memcpy(dst_data, sd_src->data, sd_src->size);
3371 }
3372 }
3373
3374 if (ost->rotate_overridden) {
3375 uint8_t *sd = av_stream_new_side_data(ost->st, AV_PKT_DATA_DISPLAYMATRIX,
3376 sizeof(int32_t) * 9);
3377 if (sd)
3378 av_display_rotation_set((int32_t *)sd, -ost->rotate_override_value);
3379 }
3380
3381 switch (par_dst->codec_type) {
3382 case AVMEDIA_TYPE_AUDIO:
3383 if (audio_volume != 256) {
3384 av_log(NULL, AV_LOG_FATAL, "-acodec copy and -vol are incompatible (frames are not decoded)\n");
3385 exit_program(1);
3386 }
3387 if((par_dst->block_align == 1 || par_dst->block_align == 1152 || par_dst->block_align == 576) && par_dst->codec_id == AV_CODEC_ID_MP3)
3388 par_dst->block_align= 0;
3389 if(par_dst->codec_id == AV_CODEC_ID_AC3)
3390 par_dst->block_align= 0;
3391 break;
3392 case AVMEDIA_TYPE_VIDEO:
3393 if (ost->frame_aspect_ratio.num) { // overridden by the -aspect cli option
3394 sar =
3395 av_mul_q(ost->frame_aspect_ratio,
3396 (AVRational){ par_dst->height, par_dst->width });
3397 av_log(NULL, AV_LOG_WARNING, "Overriding aspect ratio "
3398 "with stream copy may produce invalid files\n");
3399 }
3400 else if (ist->st->sample_aspect_ratio.num)
3401 sar = ist->st->sample_aspect_ratio;
3402 else
3403 sar = par_src->sample_aspect_ratio;
3404 ost->st->sample_aspect_ratio = par_dst->sample_aspect_ratio = sar;
3405 ost->st->avg_frame_rate = ist->st->avg_frame_rate;
3406 ost->st->r_frame_rate = ist->st->r_frame_rate;
3407 break;
3408 }
3409
3410 ost->mux_timebase = ist->st->time_base;
3411
3412 return 0;
3413}
3414
3416{
3417 AVDictionaryEntry *e;
3418
3419 uint8_t *encoder_string;
3420 int encoder_string_len;
3421 int format_flags = 0;
3422 int codec_flags = ost->enc_ctx->flags;
3423
3424 if (av_dict_get(ost->st->metadata, "encoder", NULL, 0))
3425 return;
3426
3427 e = av_dict_get(of->opts, "fflags", NULL, 0);
3428 if (e) {
3429 const AVOption *o = av_opt_find(of->ctx, "fflags", NULL, 0, 0);
3430 if (!o)
3431 return;
3432 av_opt_eval_flags(of->ctx, o, e->value, &format_flags);
3433 }
3434 e = av_dict_get(ost->encoder_opts, "flags", NULL, 0);
3435 if (e) {
3436 const AVOption *o = av_opt_find(ost->enc_ctx, "flags", NULL, 0, 0);
3437 if (!o)
3438 return;
3439 av_opt_eval_flags(ost->enc_ctx, o, e->value, &codec_flags);
3440 }
3441
3442 encoder_string_len = sizeof(LIBAVCODEC_IDENT) + strlen(ost->enc->name) + 2;
3443 encoder_string = av_mallocz(encoder_string_len);
3444 if (!encoder_string)
3445 exit_program(1);
3446
3447 if (!(format_flags & AVFMT_FLAG_BITEXACT) && !(codec_flags & AV_CODEC_FLAG_BITEXACT))
3448 av_strlcpy(encoder_string, LIBAVCODEC_IDENT " ", encoder_string_len);
3449 else
3450 av_strlcpy(encoder_string, "Lavc ", encoder_string_len);
3451 av_strlcat(encoder_string, ost->enc->name, encoder_string_len);
3452 av_dict_set(&ost->st->metadata, "encoder", encoder_string,
3453 AV_DICT_DONT_STRDUP_VAL | AV_DICT_DONT_OVERWRITE);
3454}
3455
3457 AVCodecContext *avctx)
3458{
3459 char *p;
3460 int n = 1, i, size, index = 0;
3461 int64_t t, *pts;
3462
3463 for (p = kf; *p; p++)
3464 if (*p == ',')
3465 n++;
3466 size = n;
3467 pts = av_malloc_array(size, sizeof(*pts));
3468 if (!pts) {
3469 av_log(NULL, AV_LOG_FATAL, "Could not allocate forced key frames array.\n");
3470 exit_program(1);
3471 }
3472
3473 p = kf;
3474 for (i = 0; i < n; i++) {
3475 char *next = strchr(p, ',');
3476
3477 if (next)
3478 *next++ = 0;
3479
3480 if (!memcmp(p, "chapters", 8)) {
3481
3482 AVFormatContext *avf = output_files[ost->file_index]->ctx;
3483 int j;
3484
3485 if (avf->nb_chapters > INT_MAX - size ||
3486 !(pts = av_realloc_f(pts, size += avf->nb_chapters - 1,
3487 sizeof(*pts)))) {
3488 av_log(NULL, AV_LOG_FATAL,
3489 "Could not allocate forced key frames array.\n");
3490 exit_program(1);
3491 }
3492 t = p[8] ? parse_time_or_die("force_key_frames", p + 8, 1) : 0;
3493 t = av_rescale_q(t, AV_TIME_BASE_Q, avctx->time_base);
3494
3495 for (j = 0; j < avf->nb_chapters; j++) {
3496 AVChapter *c = avf->chapters[j];
3497 av_assert1(index < size);
3498 pts[index++] = av_rescale_q(c->start, c->time_base,
3499 avctx->time_base) + t;
3500 }
3501
3502 } else {
3503
3504 t = parse_time_or_die("force_key_frames", p, 1);
3505 av_assert1(index < size);
3506 pts[index++] = av_rescale_q(t, AV_TIME_BASE_Q, avctx->time_base);
3507
3508 }
3509
3510 p = next;
3511 }
3512
3513 av_assert0(index == size);
3514 qsort(pts, size, sizeof(*pts), compare_int64);
3515 ost->forced_kf_count = size;
3517}
3518
3519static void init_encoder_time_base(OutputStream *ost, AVRational default_time_base)
3520{
3522 AVCodecContext *enc_ctx = ost->enc_ctx;
3523 AVFormatContext *oc;
3524
3525 if (ost->enc_timebase.num > 0) {
3526 enc_ctx->time_base = ost->enc_timebase;
3527 return;
3528 }
3529
3530 if (ost->enc_timebase.num < 0) {
3531 if (ist) {
3532 enc_ctx->time_base = ist->st->time_base;
3533 return;
3534 }
3535
3537 av_log(oc, AV_LOG_WARNING, "Input stream data not available, using default time base\n");
3538 }
3539
3540 enc_ctx->time_base = default_time_base;
3541}
3542
3543static int init_output_stream_encode(OutputStream *ost, AVFrame *frame)
3544{
3546 AVCodecContext *enc_ctx = ost->enc_ctx;
3547 AVCodecContext *dec_ctx = NULL;
3548 AVFormatContext *oc = output_files[ost->file_index]->ctx;
3549 int j, ret;
3550
3552
3553 // Muxers use AV_PKT_DATA_DISPLAYMATRIX to signal rotation. On the other
3554 // hand, the legacy API makes demuxers set "rotate" metadata entries,
3555 // which have to be filtered out to prevent leaking them to output files.
3556 av_dict_set(&ost->st->metadata, "rotate", NULL, 0);
3557
3558 if (ist) {
3559 ost->st->disposition = ist->st->disposition;
3560
3561 dec_ctx = ist->dec_ctx;
3562
3563 enc_ctx->chroma_sample_location = dec_ctx->chroma_sample_location;
3564 } else {
3565 for (j = 0; j < oc->nb_streams; j++) {
3566 AVStream *st = oc->streams[j];
3567 if (st != ost->st && st->codecpar->codec_type == ost->st->codecpar->codec_type)
3568 break;
3569 }
3570 if (j == oc->nb_streams)
3571 if (ost->st->codecpar->codec_type == AVMEDIA_TYPE_AUDIO ||
3572 ost->st->codecpar->codec_type == AVMEDIA_TYPE_VIDEO)
3573 ost->st->disposition = AV_DISPOSITION_DEFAULT;
3574 }
3575
3576 if (enc_ctx->codec_type == AVMEDIA_TYPE_VIDEO) {
3577 if (!ost->frame_rate.num)
3578 ost->frame_rate = av_buffersink_get_frame_rate(ost->filter->filter);
3579 if (ist && !ost->frame_rate.num)
3580 ost->frame_rate = ist->framerate;
3581 if (ist && !ost->frame_rate.num)
3582 ost->frame_rate = ist->st->r_frame_rate;
3583 if (ist && !ost->frame_rate.num && !ost->max_frame_rate.num) {
3584 ost->frame_rate = (AVRational){25, 1};
3585 av_log(NULL, AV_LOG_WARNING,
3586 "No information "
3587 "about the input framerate is available. Falling "
3588 "back to a default value of 25fps for output stream #%d:%d. Use the -r option "
3589 "if you want a different framerate.\n",
3590 ost->file_index, ost->index);
3591 }
3592
3593 if (ost->max_frame_rate.num &&
3594 (av_q2d(ost->frame_rate) > av_q2d(ost->max_frame_rate) ||
3595 !ost->frame_rate.den))
3597
3598 if (ost->enc->supported_framerates && !ost->force_fps) {
3599 int idx = av_find_nearest_q_idx(ost->frame_rate, ost->enc->supported_framerates);
3600 ost->frame_rate = ost->enc->supported_framerates[idx];
3601 }
3602 // reduce frame rate for mpeg4 to be within the spec limits
3603 if (enc_ctx->codec_id == AV_CODEC_ID_MPEG4) {
3604 av_reduce(&ost->frame_rate.num, &ost->frame_rate.den,
3605 ost->frame_rate.num, ost->frame_rate.den, 65535);
3606 }
3607 }
3608
3609 switch (enc_ctx->codec_type) {
3610 case AVMEDIA_TYPE_AUDIO:
3611 enc_ctx->sample_fmt = av_buffersink_get_format(ost->filter->filter);
3612 if (dec_ctx)
3613 enc_ctx->bits_per_raw_sample = FFMIN(dec_ctx->bits_per_raw_sample,
3614 av_get_bytes_per_sample(enc_ctx->sample_fmt) << 3);
3615 enc_ctx->sample_rate = av_buffersink_get_sample_rate(ost->filter->filter);
3616 enc_ctx->channel_layout = av_buffersink_get_channel_layout(ost->filter->filter);
3617 enc_ctx->channels = av_buffersink_get_channels(ost->filter->filter);
3618
3619 init_encoder_time_base(ost, av_make_q(1, enc_ctx->sample_rate));
3620 break;
3621
3622 case AVMEDIA_TYPE_VIDEO:
3624
3625 if (!(enc_ctx->time_base.num && enc_ctx->time_base.den))
3626 enc_ctx->time_base = av_buffersink_get_time_base(ost->filter->filter);
3627 if ( av_q2d(enc_ctx->time_base) < 0.001 && video_sync_method != VSYNC_PASSTHROUGH
3628 && (video_sync_method == VSYNC_CFR || video_sync_method == VSYNC_VSCFR || (video_sync_method == VSYNC_AUTO && !(oc->oformat->flags & AVFMT_VARIABLE_FPS)))){
3629 av_log(oc, AV_LOG_WARNING, "Frame rate very high for a muxer not efficiently supporting it.\n"
3630 "Please consider specifying a lower framerate, a different muxer or -vsync 2\n");
3631 }
3632
3633 enc_ctx->width = av_buffersink_get_w(ost->filter->filter);
3634 enc_ctx->height = av_buffersink_get_h(ost->filter->filter);
3635 enc_ctx->sample_aspect_ratio = ost->st->sample_aspect_ratio =
3636 ost->frame_aspect_ratio.num ? // overridden by the -aspect cli option
3637 av_mul_q(ost->frame_aspect_ratio, (AVRational){ enc_ctx->height, enc_ctx->width }) :
3638 av_buffersink_get_sample_aspect_ratio(ost->filter->filter);
3639
3640 enc_ctx->pix_fmt = av_buffersink_get_format(ost->filter->filter);
3641 if (dec_ctx)
3642 enc_ctx->bits_per_raw_sample = FFMIN(dec_ctx->bits_per_raw_sample,
3643 av_pix_fmt_desc_get(enc_ctx->pix_fmt)->comp[0].depth);
3644
3645 if (frame) {
3646 enc_ctx->color_range = frame->color_range;
3647 enc_ctx->color_primaries = frame->color_primaries;
3648 enc_ctx->color_trc = frame->color_trc;
3649 enc_ctx->colorspace = frame->colorspace;
3650 enc_ctx->chroma_sample_location = frame->chroma_location;
3651 }
3652
3653 enc_ctx->framerate = ost->frame_rate;
3654
3655 ost->st->avg_frame_rate = ost->frame_rate;
3656
3657 if (!dec_ctx ||
3658 enc_ctx->width != dec_ctx->width ||
3659 enc_ctx->height != dec_ctx->height ||
3660 enc_ctx->pix_fmt != dec_ctx->pix_fmt) {
3661 enc_ctx->bits_per_raw_sample = frame_bits_per_raw_sample;
3662 }
3663
3664 // Field order: autodetection
3665 if (frame) {
3666 if (enc_ctx->flags & (AV_CODEC_FLAG_INTERLACED_DCT | AV_CODEC_FLAG_INTERLACED_ME) &&
3667 ost->top_field_first >= 0)
3668 frame->top_field_first = !!ost->top_field_first;
3669
3670 if (frame->interlaced_frame) {
3671 if (enc_ctx->codec->id == AV_CODEC_ID_MJPEG)
3672 enc_ctx->field_order = frame->top_field_first ? AV_FIELD_TT:AV_FIELD_BB;
3673 else
3674 enc_ctx->field_order = frame->top_field_first ? AV_FIELD_TB:AV_FIELD_BT;
3675 } else
3676 enc_ctx->field_order = AV_FIELD_PROGRESSIVE;
3677 }
3678
3679 // Field order: override
3680 if (ost->top_field_first == 0) {
3681 enc_ctx->field_order = AV_FIELD_BB;
3682 } else if (ost->top_field_first == 1) {
3683 enc_ctx->field_order = AV_FIELD_TT;
3684 }
3685
3686 if (ost->forced_keyframes) {
3687 if (!strncmp(ost->forced_keyframes, "expr:", 5)) {
3688 ret = av_expr_parse(&ost->forced_keyframes_pexpr, ost->forced_keyframes+5,
3689 forced_keyframes_const_names, NULL, NULL, NULL, NULL, 0, NULL);
3690 if (ret < 0) {
3691 av_log(NULL, AV_LOG_ERROR,
3692 "Invalid force_key_frames expression '%s'\n", ost->forced_keyframes+5);
3693 return ret;
3694 }
3699
3700 // Don't parse the 'forced_keyframes' in case of 'keep-source-keyframes',
3701 // parse it only for static kf timings
3702 } else if(strncmp(ost->forced_keyframes, "source", 6)) {
3704 }
3705 }
3706 break;
3707 case AVMEDIA_TYPE_SUBTITLE:
3708 enc_ctx->time_base = AV_TIME_BASE_Q;
3709 if (!enc_ctx->width) {
3710 enc_ctx->width = input_streams[ost->source_index]->st->codecpar->width;
3711 enc_ctx->height = input_streams[ost->source_index]->st->codecpar->height;
3712 }
3713 break;
3714 case AVMEDIA_TYPE_DATA:
3715 break;
3716 default:
3717 abort();
3718 break;
3719 }
3720
3721 ost->mux_timebase = enc_ctx->time_base;
3722
3723 return 0;
3724}
3725
3726static int init_output_stream(OutputStream *ost, AVFrame *frame, char *error, int error_len)
3727{
3728 int ret = 0;
3729
3730 if (ost->encoding_needed) {
3731 const AVCodec *codec = ost->enc;
3732 AVCodecContext *dec = NULL;
3734
3736 if (ret < 0)
3737 return ret;
3738
3739 if ((ist = get_input_stream(ost)))
3740 dec = ist->dec_ctx;
3741 if (dec && dec->subtitle_header) {
3742 /* ASS code assumes this buffer is null terminated so add extra byte. */
3743 ost->enc_ctx->subtitle_header = av_mallocz(dec->subtitle_header_size + 1);
3744 if (!ost->enc_ctx->subtitle_header)
3745 return AVERROR(ENOMEM);
3746 memcpy(ost->enc_ctx->subtitle_header, dec->subtitle_header, dec->subtitle_header_size);
3747 ost->enc_ctx->subtitle_header_size = dec->subtitle_header_size;
3748 }
3749 if (!av_dict_get(ost->encoder_opts, "threads", NULL, 0))
3750 av_dict_set(&ost->encoder_opts, "threads", "auto", 0);
3751 if (ost->enc->type == AVMEDIA_TYPE_AUDIO &&
3752 !codec->defaults &&
3753 !av_dict_get(ost->encoder_opts, "b", NULL, 0) &&
3754 !av_dict_get(ost->encoder_opts, "ab", NULL, 0))
3755 av_dict_set(&ost->encoder_opts, "b", "128000", 0);
3756
3758 if (ret < 0) {
3759 snprintf(error, error_len, "Device setup failed for "
3760 "encoder on output stream #%d:%d : %s",
3761 ost->file_index, ost->index, av_err2str(ret));
3762 return ret;
3763 }
3764
3765 if (ist && ist->dec->type == AVMEDIA_TYPE_SUBTITLE && ost->enc->type == AVMEDIA_TYPE_SUBTITLE) {
3766 int input_props = 0, output_props = 0;
3767 AVCodecDescriptor const *input_descriptor =
3768 avcodec_descriptor_get(dec->codec_id);
3769 AVCodecDescriptor const *output_descriptor =
3770 avcodec_descriptor_get(ost->enc_ctx->codec_id);
3771 if (input_descriptor)
3772 input_props = input_descriptor->props & (AV_CODEC_PROP_TEXT_SUB | AV_CODEC_PROP_BITMAP_SUB);
3773 if (output_descriptor)
3774 output_props = output_descriptor->props & (AV_CODEC_PROP_TEXT_SUB | AV_CODEC_PROP_BITMAP_SUB);
3775 if (input_props && output_props && input_props != output_props) {
3776 snprintf(error, error_len,
3777 "Subtitle encoding currently only possible from text to text "
3778 "or bitmap to bitmap");
3779 return AVERROR_INVALIDDATA;
3780 }
3781 }
3782
3783 if ((ret = avcodec_open2(ost->enc_ctx, codec, &ost->encoder_opts)) < 0) {
3784 if (ret == AVERROR_EXPERIMENTAL)
3785 abort_codec_experimental(codec, 1);
3786 snprintf(error, error_len,
3787 "Error while opening encoder for output stream #%d:%d - "
3788 "maybe incorrect parameters such as bit_rate, rate, width or height",
3789 ost->file_index, ost->index);
3790 return ret;
3791 }
3792 if (ost->enc->type == AVMEDIA_TYPE_AUDIO &&
3793 !(ost->enc->capabilities & AV_CODEC_CAP_VARIABLE_FRAME_SIZE))
3794 av_buffersink_set_frame_size(ost->filter->filter,
3795 ost->enc_ctx->frame_size);
3797 if (ost->enc_ctx->bit_rate && ost->enc_ctx->bit_rate < 1000 &&
3798 ost->enc_ctx->codec_id != AV_CODEC_ID_CODEC2 /* don't complain about 700 bit/s modes */)
3799 av_log(NULL, AV_LOG_WARNING, "The bitrate parameter is set too low."
3800 " It takes bits/s as argument, not kbits/s\n");
3801
3802 ret = avcodec_parameters_from_context(ost->st->codecpar, ost->enc_ctx);
3803 if (ret < 0) {
3804 av_log(NULL, AV_LOG_FATAL,
3805 "Error initializing the output stream codec context.\n");
3806 exit_program(1);
3807 }
3808
3809 if (ost->enc_ctx->nb_coded_side_data) {
3810 int i;
3811
3812 for (i = 0; i < ost->enc_ctx->nb_coded_side_data; i++) {
3813 const AVPacketSideData *sd_src = &ost->enc_ctx->coded_side_data[i];
3814 uint8_t *dst_data;
3815
3816 dst_data = av_stream_new_side_data(ost->st, sd_src->type, sd_src->size);
3817 if (!dst_data)
3818 return AVERROR(ENOMEM);
3819 memcpy(dst_data, sd_src->data, sd_src->size);
3820 }
3821 }
3822
3823 /*
3824 * Add global input side data. For now this is naive, and copies it
3825 * from the input stream's global side data. All side data should
3826 * really be funneled over AVFrame and libavfilter, then added back to
3827 * packet side data, and then potentially using the first packet for
3828 * global side data.
3829 */
3830 if (ist) {
3831 int i;
3832 for (i = 0; i < ist->st->nb_side_data; i++) {
3833 AVPacketSideData *sd = &ist->st->side_data[i];
3834 if (sd->type != AV_PKT_DATA_CPB_PROPERTIES) {
3835 uint8_t *dst = av_stream_new_side_data(ost->st, sd->type, sd->size);
3836 if (!dst)
3837 return AVERROR(ENOMEM);
3838 memcpy(dst, sd->data, sd->size);
3839 if (ist->autorotate && sd->type == AV_PKT_DATA_DISPLAYMATRIX)
3840 av_display_rotation_set((uint32_t *)dst, 0);
3841 }
3842 }
3843 }
3844
3845 // copy timebase while removing common factors
3846 if (ost->st->time_base.num <= 0 || ost->st->time_base.den <= 0)
3847 ost->st->time_base = av_add_q(ost->enc_ctx->time_base, (AVRational){0, 1});
3848
3849 // copy estimated duration as a hint to the muxer
3850 if (ost->st->duration <= 0 && ist && ist->st->duration > 0)
3851 ost->st->duration = av_rescale_q(ist->st->duration, ist->st->time_base, ost->st->time_base);
3852 } else if (ost->stream_copy) {
3854 if (ret < 0)
3855 return ret;
3856 }
3857
3858 // parse user provided disposition, and update stream values
3859 if (ost->disposition) {
3860 static const AVOption opts[] = {
3861 { "disposition" , NULL, 0, AV_OPT_TYPE_FLAGS, { .i64 = 0 }, INT64_MIN, (double)INT64_MAX, .unit = "flags" },
3862 { "default" , NULL, 0, AV_OPT_TYPE_CONST, { .i64 = AV_DISPOSITION_DEFAULT }, .unit = "flags" },
3863 { "dub" , NULL, 0, AV_OPT_TYPE_CONST, { .i64 = AV_DISPOSITION_DUB }, .unit = "flags" },
3864 { "original" , NULL, 0, AV_OPT_TYPE_CONST, { .i64 = AV_DISPOSITION_ORIGINAL }, .unit = "flags" },
3865 { "comment" , NULL, 0, AV_OPT_TYPE_CONST, { .i64 = AV_DISPOSITION_COMMENT }, .unit = "flags" },
3866 { "lyrics" , NULL, 0, AV_OPT_TYPE_CONST, { .i64 = AV_DISPOSITION_LYRICS }, .unit = "flags" },
3867 { "karaoke" , NULL, 0, AV_OPT_TYPE_CONST, { .i64 = AV_DISPOSITION_KARAOKE }, .unit = "flags" },
3868 { "forced" , NULL, 0, AV_OPT_TYPE_CONST, { .i64 = AV_DISPOSITION_FORCED }, .unit = "flags" },
3869 { "hearing_impaired" , NULL, 0, AV_OPT_TYPE_CONST, { .i64 = AV_DISPOSITION_HEARING_IMPAIRED }, .unit = "flags" },
3870 { "visual_impaired" , NULL, 0, AV_OPT_TYPE_CONST, { .i64 = AV_DISPOSITION_VISUAL_IMPAIRED }, .unit = "flags" },
3871 { "clean_effects" , NULL, 0, AV_OPT_TYPE_CONST, { .i64 = AV_DISPOSITION_CLEAN_EFFECTS }, .unit = "flags" },
3872 { "attached_pic" , NULL, 0, AV_OPT_TYPE_CONST, { .i64 = AV_DISPOSITION_ATTACHED_PIC }, .unit = "flags" },
3873 { "captions" , NULL, 0, AV_OPT_TYPE_CONST, { .i64 = AV_DISPOSITION_CAPTIONS }, .unit = "flags" },
3874 { "descriptions" , NULL, 0, AV_OPT_TYPE_CONST, { .i64 = AV_DISPOSITION_DESCRIPTIONS }, .unit = "flags" },
3875 { "dependent" , NULL, 0, AV_OPT_TYPE_CONST, { .i64 = AV_DISPOSITION_DEPENDENT }, .unit = "flags" },
3876 { "metadata" , NULL, 0, AV_OPT_TYPE_CONST, { .i64 = AV_DISPOSITION_METADATA }, .unit = "flags" },
3877 { NULL },
3878 };
3879 static const AVClass class = {
3880 .class_name = "",
3881 .item_name = av_default_item_name,
3882 .option = opts,
3883 .version = LIBAVUTIL_VERSION_INT,
3884 };
3885 const AVClass *pclass = &class;
3886
3887 ret = av_opt_eval_flags(&pclass, &opts[0], ost->disposition, &ost->st->disposition);
3888 if (ret < 0)
3889 return ret;
3890 }
3891
3892 /* initialize bitstream filters for the output stream
3893 * needs to be done here, because the codec id for streamcopy is not
3894 * known until now */
3895 ret = init_output_bsfs(ost);
3896 if (ret < 0)
3897 return ret;
3898
3899 ost->initialized = 1;
3900
3902 if (ret < 0)
3903 return ret;
3904
3905 return ret;
3906}
3907
3908static void report_new_stream(int input_index, AVPacket *pkt)
3909{
3910 InputFile *file = input_files[input_index];
3911 AVStream *st = file->ctx->streams[pkt->stream_index];
3912
3913 if (pkt->stream_index < file->nb_streams_warn)
3914 return;
3915 av_log(file->ctx, AV_LOG_WARNING,
3916 "New %s stream %d:%d at pos:%"PRId64" and DTS:%ss\n",
3917 av_get_media_type_string(st->codecpar->codec_type),
3918 input_index, pkt->stream_index,
3919 pkt->pos, av_ts2timestr(pkt->dts, &st->time_base));
3920 file->nb_streams_warn = pkt->stream_index + 1;
3921}
3922
3923static int transcode_init(void)
3924{
3925 int ret = 0, i, j, k;
3926 AVFormatContext *oc;
3929 char error[1024] = {0};
3930
3931 for (i = 0; i < nb_filtergraphs; i++) {
3932 FilterGraph *fg = filtergraphs[i];
3933 for (j = 0; j < fg->nb_outputs; j++) {
3934 OutputFilter *ofilter = fg->outputs[j];
3935 if (!ofilter->ost || ofilter->ost->source_index >= 0)
3936 continue;
3937 if (fg->nb_inputs != 1)
3938 continue;
3939 for (k = nb_input_streams-1; k >= 0 ; k--)
3940 if (fg->inputs[0]->ist == input_streams[k])
3941 break;
3942 ofilter->ost->source_index = k;
3943 }
3944 }
3945
3946 /* init framerate emulation */
3947 for (i = 0; i < nb_input_files; i++) {
3948 InputFile *ifile = input_files[i];
3949 if (ifile->readrate || ifile->rate_emu)
3950 for (j = 0; j < ifile->nb_streams; j++)
3951 input_streams[j + ifile->ist_index]->start = av_gettime_relative();
3952 }
3953
3954 /* init input streams */
3955 for (i = 0; i < nb_input_streams; i++)
3956 if ((ret = init_input_stream(i, error, sizeof(error))) < 0) {
3957 for (i = 0; i < nb_output_streams; i++) {
3958 ost = output_streams[i];
3959 avcodec_close(ost->enc_ctx);
3960 }
3961 goto dump_format;
3962 }
3963
3964 /*
3965 * initialize stream copy and subtitle/data streams.
3966 * Encoded AVFrame based streams will get initialized as follows:
3967 * - when the first AVFrame is received in do_video_out
3968 * - just before the first AVFrame is received in either transcode_step
3969 * or reap_filters due to us requiring the filter chain buffer sink
3970 * to be configured with the correct audio frame size, which is only
3971 * known after the encoder is initialized.
3972 */
3973 for (i = 0; i < nb_output_streams; i++) {
3974 if (!output_streams[i]->stream_copy &&
3975 (output_streams[i]->enc_ctx->codec_type == AVMEDIA_TYPE_VIDEO ||
3976 output_streams[i]->enc_ctx->codec_type == AVMEDIA_TYPE_AUDIO))
3977 continue;
3978
3979 ret = init_output_stream_wrapper(output_streams[i], NULL, 0);
3980 if (ret < 0)
3981 goto dump_format;
3982 }
3983
3984 /* discard unused programs */
3985 for (i = 0; i < nb_input_files; i++) {
3986 InputFile *ifile = input_files[i];
3987 for (j = 0; j < ifile->ctx->nb_programs; j++) {
3988 AVProgram *p = ifile->ctx->programs[j];
3989 int discard = AVDISCARD_ALL;
3990
3991 for (k = 0; k < p->nb_stream_indexes; k++)
3992 if (!input_streams[ifile->ist_index + p->stream_index[k]]->discard) {
3993 discard = AVDISCARD_DEFAULT;
3994 break;
3995 }
3996 p->discard = discard;
3997 }
3998 }
3999
4000 /* write headers for files with no streams */
4001 for (i = 0; i < nb_output_files; i++) {
4002 oc = output_files[i]->ctx;
4003 if (oc->oformat->flags & AVFMT_NOSTREAMS && oc->nb_streams == 0) {
4005 if (ret < 0)
4006 goto dump_format;
4007 }
4008 }
4009
4010 dump_format:
4011 /* dump the stream mapping */
4012 av_log(NULL, AV_LOG_INFO, "Stream mapping:\n");
4013 for (i = 0; i < nb_input_streams; i++) {
4014 ist = input_streams[i];
4015
4016 for (j = 0; j < ist->nb_filters; j++) {
4017 if (!filtergraph_is_simple(ist->filters[j]->graph)) {
4018 av_log(NULL, AV_LOG_INFO, " Stream #%d:%d (%s) -> %s",
4019 ist->file_index, ist->st->index, ist->dec ? ist->dec->name : "?",
4020 ist->filters[j]->name);
4021 if (nb_filtergraphs > 1)
4022 av_log(NULL, AV_LOG_INFO, " (graph %d)", ist->filters[j]->graph->index);
4023 av_log(NULL, AV_LOG_INFO, "\n");
4024 }
4025 }
4026 }
4027
4028 for (i = 0; i < nb_output_streams; i++) {
4029 ost = output_streams[i];
4030
4031 if (ost->attachment_filename) {
4032 /* an attached file */
4033 av_log(NULL, AV_LOG_INFO, " File %s -> Stream #%d:%d\n",
4035 continue;
4036 }
4037
4039 /* output from a complex graph */
4040 av_log(NULL, AV_LOG_INFO, " %s", ost->filter->name);
4041 if (nb_filtergraphs > 1)
4042 av_log(NULL, AV_LOG_INFO, " (graph %d)", ost->filter->graph->index);
4043
4044 av_log(NULL, AV_LOG_INFO, " -> Stream #%d:%d (%s)\n", ost->file_index,
4045 ost->index, ost->enc ? ost->enc->name : "?");
4046 continue;
4047 }
4048
4049 av_log(NULL, AV_LOG_INFO, " Stream #%d:%d -> #%d:%d",
4051 input_streams[ost->source_index]->st->index,
4052 ost->file_index,
4053 ost->index);
4055 av_log(NULL, AV_LOG_INFO, " [sync #%d:%d]",
4057 ost->sync_ist->st->index);
4058 if (ost->stream_copy)
4059 av_log(NULL, AV_LOG_INFO, " (copy)");
4060 else {
4061 const AVCodec *in_codec = input_streams[ost->source_index]->dec;
4062 const AVCodec *out_codec = ost->enc;
4063 const char *decoder_name = "?";
4064 const char *in_codec_name = "?";
4065 const char *encoder_name = "?";
4066 const char *out_codec_name = "?";
4067 const AVCodecDescriptor *desc;
4068
4069 if (in_codec) {
4070 decoder_name = in_codec->name;
4071 desc = avcodec_descriptor_get(in_codec->id);
4072 if (desc)
4073 in_codec_name = desc->name;
4074 if (!strcmp(decoder_name, in_codec_name))
4075 decoder_name = "native";
4076 }
4077
4078 if (out_codec) {
4079 encoder_name = out_codec->name;
4080 desc = avcodec_descriptor_get(out_codec->id);
4081 if (desc)
4082 out_codec_name = desc->name;
4083 if (!strcmp(encoder_name, out_codec_name))
4084 encoder_name = "native";
4085 }
4086
4087 av_log(NULL, AV_LOG_INFO, " (%s (%s) -> %s (%s))",
4088 in_codec_name, decoder_name,
4089 out_codec_name, encoder_name);
4090 }
4091 av_log(NULL, AV_LOG_INFO, "\n");
4092 }
4093
4094 if (ret) {
4095 av_log(NULL, AV_LOG_ERROR, "%s\n", error);
4096 return ret;
4097 }
4098
4099 atomic_store(&transcode_init_done, 1);
4100
4101 return 0;
4102}
4103
4104/* Return 1 if there remain streams where more output is wanted, 0 otherwise. */
4105static int need_output(void)
4106{
4107 int i;
4108
4109 for (i = 0; i < nb_output_streams; i++) {
4112 AVFormatContext *os = output_files[ost->file_index]->ctx;
4113
4114 if (ost->finished ||
4115 (os->pb && avio_tell(os->pb) >= of->limit_filesize))
4116 continue;
4117 if (ost->frame_number >= ost->max_frames) {
4118 int j;
4119 for (j = 0; j < of->ctx->nb_streams; j++)
4121 continue;
4122 }
4123
4124 return 1;
4125 }
4126
4127 return 0;
4128}
4129
4136{
4137 int i;
4138 int64_t opts_min = INT64_MAX;
4139 OutputStream *ost_min = NULL;
4140
4141 for (i = 0; i < nb_output_streams; i++) {
4143 int64_t opts = ost->last_mux_dts == AV_NOPTS_VALUE ? INT64_MIN :
4144 av_rescale_q(ost->last_mux_dts, ost->st->time_base,
4145 AV_TIME_BASE_Q);
4146 if (ost->last_mux_dts == AV_NOPTS_VALUE)
4147 av_log(NULL, AV_LOG_DEBUG,
4148 "cur_dts is invalid st:%d (%d) [init:%d i_done:%d finish:%d] (this is harmless if it occurs once at the start per stream)\n",
4149 ost->st->index, ost->st->id, ost->initialized, ost->inputs_done, ost->finished);
4150
4151 if (!ost->initialized && !ost->inputs_done)
4152 return ost->unavailable ? NULL : ost;
4153
4154 if (!ost->finished && opts < opts_min) {
4155 opts_min = opts;
4156 ost_min = ost->unavailable ? NULL : ost;
4157 }
4158 }
4159 return ost_min;
4160}
4161
4162static void set_tty_echo(int on)
4163{
4164#if HAVE_TERMIOS_H
4165 struct termios tty;
4166 if (tcgetattr(0, &tty) == 0) {
4167 if (on) tty.c_lflag |= ECHO;
4168 else tty.c_lflag &= ~ECHO;
4169 tcsetattr(0, TCSANOW, &tty);
4170 }
4171#endif
4172}
4173
4174static int check_keyboard_interaction(int64_t cur_time)
4175{
4176 int i, ret, key;
4178 return AVERROR_EXIT;
4179 /* read_key() returns 0 on EOF */
4180 if(cur_time - keyboard_last_time >= 100000 && !run_as_daemon){
4181 key = read_key();
4182 keyboard_last_time = cur_time;
4183 }else
4184 key = -1;
4185 if (key == 'q')
4186 return AVERROR_EXIT;
4187 if (key == '+') av_log_set_level(av_log_get_level()+10);
4188 if (key == '-') av_log_set_level(av_log_get_level()-10);
4189 if (key == 's') qp_hist ^= 1;
4190 if (key == 'h'){
4191 if (do_hex_dump){
4193 } else if(do_pkt_dump){
4194 do_hex_dump = 1;
4195 } else
4196 do_pkt_dump = 1;
4197 av_log_set_level(AV_LOG_DEBUG);
4198 }
4199 if (key == 'c' || key == 'C'){
4200 char buf[4096], target[64], command[256], arg[256] = {0};
4201 double time;
4202 int k, n = 0;
4203 fprintf(stderr, "\nEnter command: <target>|all <time>|-1 <command>[ <argument>]\n");
4204 i = 0;
4205 set_tty_echo(1);
4206 while ((k = read_key()) != '\n' && k != '\r' && i < sizeof(buf)-1)
4207 if (k > 0)
4208 buf[i++] = k;
4209 buf[i] = 0;
4210 set_tty_echo(0);
4211 fprintf(stderr, "\n");
4212 if (k > 0 &&
4213 (n = sscanf(buf, "%63[^ ] %lf %255[^ ] %255[^\n]", target, &time, command, arg)) >= 3) {
4214 av_log(NULL, AV_LOG_DEBUG, "Processing command target:%s time:%f command:%s arg:%s",
4215 target, time, command, arg);
4216 for (i = 0; i < nb_filtergraphs; i++) {
4217 FilterGraph *fg = filtergraphs[i];
4218 if (fg->graph) {
4219 if (time < 0) {
4220 ret = avfilter_graph_send_command(fg->graph, target, command, arg, buf, sizeof(buf),
4221 key == 'c' ? AVFILTER_CMD_FLAG_ONE : 0);
4222 fprintf(stderr, "Command reply for stream %d: ret:%d res:\n%s", i, ret, buf);
4223 } else if (key == 'c') {
4224 fprintf(stderr, "Queuing commands only on filters supporting the specific command is unsupported\n");
4225 ret = AVERROR_PATCHWELCOME;
4226 } else {
4227 ret = avfilter_graph_queue_command(fg->graph, target, command, arg, 0, time);
4228 if (ret < 0)
4229 fprintf(stderr, "Queuing command failed with error %s\n", av_err2str(ret));
4230 }
4231 }
4232 }
4233 } else {
4234 av_log(NULL, AV_LOG_ERROR,
4235 "Parse error, at least 3 arguments were expected, "
4236 "only %d given in string '%s'\n", n, buf);
4237 }
4238 }
4239 if (key == 'd' || key == 'D'){
4240 int debug=0;
4241 if(key == 'D') {
4242 debug = input_streams[0]->dec_ctx->debug << 1;
4243 if(!debug) debug = 1;
4244 while (debug & FF_DEBUG_DCT_COEFF) //unsupported, would just crash
4245 debug += debug;
4246 }else{
4247 char buf[32];
4248 int k = 0;
4249 i = 0;
4250 set_tty_echo(1);
4251 while ((k = read_key()) != '\n' && k != '\r' && i < sizeof(buf)-1)
4252 if (k > 0)
4253 buf[i++] = k;
4254 buf[i] = 0;
4255 set_tty_echo(0);
4256 fprintf(stderr, "\n");
4257 if (k <= 0 || sscanf(buf, "%d", &debug)!=1)
4258 fprintf(stderr,"error parsing debug value\n");
4259 }
4260 for(i=0;i<nb_input_streams;i++) {
4261 input_streams[i]->dec_ctx->debug = debug;
4262 }
4263 for(i=0;i<nb_output_streams;i++) {
4265 ost->enc_ctx->debug = debug;
4266 }
4267 if(debug) av_log_set_level(AV_LOG_DEBUG);
4268 fprintf(stderr,"debug=%d\n", debug);
4269 }
4270 if (key == '?'){
4271 fprintf(stderr, "key function\n"
4272 "? show this help\n"
4273 "+ increase verbosity\n"
4274 "- decrease verbosity\n"
4275 "c Send command to first matching filter supporting it\n"
4276 "C Send/Queue command to all matching filters\n"
4277 "D cycle through available debug modes\n"
4278 "h dump packets/hex press to cycle through the 3 states\n"
4279 "q quit\n"
4280 "s Show QP histogram\n"
4281 );
4282 }
4283 return 0;
4284}
4285
4286#if HAVE_THREADS
4287static void *input_thread(void *arg)
4288{
4289 InputFile *f = arg;
4290 AVPacket *pkt = f->pkt, *queue_pkt;
4291 unsigned flags = f->non_blocking ? AV_THREAD_MESSAGE_NONBLOCK : 0;
4292 int ret = 0;
4293
4294 while (1) {
4295 ret = av_read_frame(f->ctx, pkt);
4296
4297 if (ret == AVERROR(EAGAIN)) {
4298 av_usleep(10000);
4299 continue;
4300 }
4301 if (ret < 0) {
4302 av_thread_message_queue_set_err_recv(f->in_thread_queue, ret);
4303 break;
4304 }
4305 queue_pkt = av_packet_alloc();
4306 if (!queue_pkt) {
4307 av_packet_unref(pkt);
4308 av_thread_message_queue_set_err_recv(f->in_thread_queue, AVERROR(ENOMEM));
4309 break;
4310 }
4311 av_packet_move_ref(queue_pkt, pkt);
4312 ret = av_thread_message_queue_send(f->in_thread_queue, &queue_pkt, flags);
4313 if (flags && ret == AVERROR(EAGAIN)) {
4314 flags = 0;
4315 ret = av_thread_message_queue_send(f->in_thread_queue, &queue_pkt, flags);
4316 av_log(f->ctx, AV_LOG_WARNING,
4317 "Thread message queue blocking; consider raising the "
4318 "thread_queue_size option (current value: %d)\n",
4319 f->thread_queue_size);
4320 }
4321 if (ret < 0) {
4322 if (ret != AVERROR_EOF)
4323 av_log(f->ctx, AV_LOG_ERROR,
4324 "Unable to send packet to main thread: %s\n",
4325 av_err2str(ret));
4326 av_packet_free(&queue_pkt);
4327 av_thread_message_queue_set_err_recv(f->in_thread_queue, ret);
4328 break;
4329 }
4330 }
4331
4332 return NULL;
4333}
4334
4335static void free_input_thread(int i)
4336{
4337 InputFile *f = input_files[i];
4338 AVPacket *pkt;
4339
4340 if (!f || !f->in_thread_queue)
4341 return;
4342 av_thread_message_queue_set_err_send(f->in_thread_queue, AVERROR_EOF);
4343 while (av_thread_message_queue_recv(f->in_thread_queue, &pkt, 0) >= 0)
4344 av_packet_free(&pkt);
4345
4346 pthread_join(f->thread, NULL);
4347 f->joined = 1;
4348 av_thread_message_queue_free(&f->in_thread_queue);
4349}
4350
4351static void free_input_threads(void)
4352{
4353 int i;
4354
4355 for (i = 0; i < nb_input_files; i++)
4356 free_input_thread(i);
4357}
4358
4359static int init_input_thread(int i)
4360{
4361 int ret;
4362 InputFile *f = input_files[i];
4363
4364 if (f->thread_queue_size < 0)
4365 f->thread_queue_size = (nb_input_files > 1 ? 8 : 0);
4366 if (!f->thread_queue_size)
4367 return 0;
4368
4369 if (f->ctx->pb ? !f->ctx->pb->seekable :
4370 strcmp(f->ctx->iformat->name, "lavfi"))
4371 f->non_blocking = 1;
4372 ret = av_thread_message_queue_alloc(&f->in_thread_queue,
4373 f->thread_queue_size, sizeof(f->pkt));
4374 if (ret < 0)
4375 return ret;
4376
4377 if ((ret = pthread_create(&f->thread, NULL, input_thread, f))) {
4378 av_log(NULL, AV_LOG_ERROR, "pthread_create failed: %s. Try to increase `ulimit -v` or decrease `ulimit -s`.\n", strerror(ret));
4379 av_thread_message_queue_free(&f->in_thread_queue);
4380 return AVERROR(ret);
4381 }
4382
4383 return 0;
4384}
4385
4386static int init_input_threads(void)
4387{
4388 int i, ret;
4389
4390 for (i = 0; i < nb_input_files; i++) {
4391 ret = init_input_thread(i);
4392 if (ret < 0)
4393 return ret;
4394 }
4395 return 0;
4396}
4397
4398static int get_input_packet_mt(InputFile *f, AVPacket **pkt)
4399{
4400 return av_thread_message_queue_recv(f->in_thread_queue, pkt,
4401 f->non_blocking ?
4402 AV_THREAD_MESSAGE_NONBLOCK : 0);
4403}
4404#endif
4405
4406static int get_input_packet(InputFile *f, AVPacket **pkt)
4407{
4408 if (f->readrate || f->rate_emu) {
4409 int i;
4410 int64_t file_start = copy_ts * (
4411 (f->ctx->start_time != AV_NOPTS_VALUE ? f->ctx->start_time * !start_at_zero : 0) +
4412 (f->start_time != AV_NOPTS_VALUE ? f->start_time : 0)
4413 );
4414 float scale = f->rate_emu ? 1.0 : f->readrate;
4415 for (i = 0; i < f->nb_streams; i++) {
4417 int64_t stream_ts_offset, pts, now;
4418 if (!ist->nb_packets || (ist->decoding_needed && !ist->got_output)) continue;
4419 stream_ts_offset = FFMAX(ist->first_dts != AV_NOPTS_VALUE ? ist->first_dts : 0, file_start);
4420 pts = av_rescale(ist->dts, 1000000, AV_TIME_BASE);
4421 now = (av_gettime_relative() - ist->start) * scale + stream_ts_offset;
4422 if (pts > now)
4423 return AVERROR(EAGAIN);
4424 }
4425 }
4426
4427#if HAVE_THREADS
4428 if (f->thread_queue_size)
4429 return get_input_packet_mt(f, pkt);
4430#endif
4431 *pkt = f->pkt;
4432 return av_read_frame(f->ctx, *pkt);
4433}
4434
4435static int got_eagain(void)
4436{
4437 int i;
4438 for (i = 0; i < nb_output_streams; i++)
4439 if (output_streams[i]->unavailable)
4440 return 1;
4441 return 0;
4442}
4443
4444static void reset_eagain(void)
4445{
4446 int i;
4447 for (i = 0; i < nb_input_files; i++)
4448 input_files[i]->eagain = 0;
4449 for (i = 0; i < nb_output_streams; i++)
4450 output_streams[i]->unavailable = 0;
4451}
4452
4453// set duration to max(tmp, duration) in a proper time base and return duration's time_base
4454static AVRational duration_max(int64_t tmp, int64_t *duration, AVRational tmp_time_base,
4455 AVRational time_base)
4456{
4457 int ret;
4458
4459 if (!*duration) {
4460 *duration = tmp;
4461 return tmp_time_base;
4462 }
4463
4464 ret = av_compare_ts(*duration, time_base, tmp, tmp_time_base);
4465 if (ret < 0) {
4466 *duration = tmp;
4467 return tmp_time_base;
4468 }
4469
4470 return time_base;
4471}
4472
4473static int seek_to_start(InputFile *ifile, AVFormatContext *is)
4474{
4476 AVCodecContext *avctx;
4477 int i, ret, has_audio = 0;
4478 int64_t duration = 0;
4479
4480 ret = avformat_seek_file(is, -1, INT64_MIN, is->start_time, is->start_time, 0);
4481 if (ret < 0)
4482 return ret;
4483
4484 for (i = 0; i < ifile->nb_streams; i++) {
4485 ist = input_streams[ifile->ist_index + i];
4486 avctx = ist->dec_ctx;
4487
4488 /* duration is the length of the last frame in a stream
4489 * when audio stream is present we don't care about
4490 * last video frame length because it's not defined exactly */
4491 if (avctx->codec_type == AVMEDIA_TYPE_AUDIO && ist->nb_samples)
4492 has_audio = 1;
4493 }
4494
4495 for (i = 0; i < ifile->nb_streams; i++) {
4496 ist = input_streams[ifile->ist_index + i];
4497 avctx = ist->dec_ctx;
4498
4499 if (has_audio) {
4500 if (avctx->codec_type == AVMEDIA_TYPE_AUDIO && ist->nb_samples) {
4501 AVRational sample_rate = {1, avctx->sample_rate};
4502
4503 duration = av_rescale_q(ist->nb_samples, sample_rate, ist->st->time_base);
4504 } else {
4505 continue;
4506 }
4507 } else {
4508 if (ist->framerate.num) {
4509 duration = av_rescale_q(1, av_inv_q(ist->framerate), ist->st->time_base);
4510 } else if (ist->st->avg_frame_rate.num) {
4511 duration = av_rescale_q(1, av_inv_q(ist->st->avg_frame_rate), ist->st->time_base);
4512 } else {
4513 duration = 1;
4514 }
4515 }
4516 if (!ifile->duration)
4517 ifile->time_base = ist->st->time_base;
4518 /* the total duration of the stream, max_pts - min_pts is
4519 * the duration of the stream without the last frame */
4520 if (ist->max_pts > ist->min_pts && ist->max_pts - (uint64_t)ist->min_pts < INT64_MAX - duration)
4521 duration += ist->max_pts - ist->min_pts;
4522 ifile->time_base = duration_max(duration, &ifile->duration, ist->st->time_base,
4523 ifile->time_base);
4524 }
4525
4526 if (ifile->loop > 0)
4527 ifile->loop--;
4528
4529 return ret;
4530}
4531
4532/*
4533 * Return
4534 * - 0 -- one packet was read and processed
4535 * - AVERROR(EAGAIN) -- no packets were available for selected file,
4536 * this function should be called again
4537 * - AVERROR_EOF -- this function should not be called again
4538 */
4539static int process_input(int file_index)
4540{
4541 InputFile *ifile = input_files[file_index];
4542 AVFormatContext *is;
4544 AVPacket *pkt;
4545 int ret, thread_ret, i, j;
4546 int64_t duration;
4547 int64_t pkt_dts;
4548 int disable_discontinuity_correction = copy_ts;
4549
4550 is = ifile->ctx;
4551 ret = get_input_packet(ifile, &pkt);
4552
4553 if (ret == AVERROR(EAGAIN)) {
4554 ifile->eagain = 1;
4555 return ret;
4556 }
4557 if (ret < 0 && ifile->loop) {
4558 AVCodecContext *avctx;
4559 for (i = 0; i < ifile->nb_streams; i++) {
4560 ist = input_streams[ifile->ist_index + i];
4561 avctx = ist->dec_ctx;
4562 if (ist->decoding_needed) {
4563 ret = process_input_packet(ist, NULL, 1);
4564 if (ret>0)
4565 return 0;
4566 avcodec_flush_buffers(avctx);
4567 }
4568 }
4569#if HAVE_THREADS
4570 free_input_thread(file_index);
4571#endif
4572 ret = seek_to_start(ifile, is);
4573#if HAVE_THREADS
4574 thread_ret = init_input_thread(file_index);
4575 if (thread_ret < 0)
4576 return thread_ret;
4577#endif
4578 if (ret < 0)
4579 av_log(NULL, AV_LOG_WARNING, "Seek to start failed.\n");
4580 else
4581 ret = get_input_packet(ifile, &pkt);
4582 if (ret == AVERROR(EAGAIN)) {
4583 ifile->eagain = 1;
4584 return ret;
4585 }
4586 }
4587 if (ret < 0) {
4588 if (ret != AVERROR_EOF) {
4589 print_error(is->url, ret);
4590 if (exit_on_error)
4591 exit_program(1);
4592 }
4593
4594 for (i = 0; i < ifile->nb_streams; i++) {
4595 ist = input_streams[ifile->ist_index + i];
4596 if (ist->decoding_needed) {
4597 ret = process_input_packet(ist, NULL, 0);
4598 if (ret>0)
4599 return 0;
4600 }
4601
4602 /* mark all outputs that don't go through lavfi as finished */
4603 for (j = 0; j < nb_output_streams; j++) {
4605
4606 if (ost->source_index == ifile->ist_index + i &&
4607 (ost->stream_copy || ost->enc->type == AVMEDIA_TYPE_SUBTITLE))
4609 }
4610 }
4611
4612 ifile->eof_reached = 1;
4613 return AVERROR(EAGAIN);
4614 }
4615
4616 reset_eagain();
4617
4618 if (do_pkt_dump) {
4619 av_pkt_dump_log2(NULL, AV_LOG_INFO, pkt, do_hex_dump,
4620 is->streams[pkt->stream_index]);
4621 }
4622 /* the following test is needed in case new streams appear
4623 dynamically in stream : we ignore them */
4624 if (pkt->stream_index >= ifile->nb_streams) {
4625 report_new_stream(file_index, pkt);
4626 goto discard_packet;
4627 }
4628
4629 ist = input_streams[ifile->ist_index + pkt->stream_index];
4630
4631 ist->data_size += pkt->size;
4632 ist->nb_packets++;
4633
4634 if (ist->discard)
4635 goto discard_packet;
4636
4637 if (pkt->flags & AV_PKT_FLAG_CORRUPT) {
4638 av_log(NULL, exit_on_error ? AV_LOG_FATAL : AV_LOG_WARNING,
4639 "%s: corrupt input packet in stream %d\n", is->url, pkt->stream_index);
4640 if (exit_on_error)
4641 exit_program(1);
4642 }
4643
4644 if (debug_ts) {
4645 av_log(NULL, AV_LOG_INFO, "demuxer -> ist_index:%d type:%s "
4646 "next_dts:%s next_dts_time:%s next_pts:%s next_pts_time:%s pkt_pts:%s pkt_pts_time:%s pkt_dts:%s pkt_dts_time:%s off:%s off_time:%s\n",
4647 ifile->ist_index + pkt->stream_index, av_get_media_type_string(ist->dec_ctx->codec_type),
4648 av_ts2str(ist->next_dts), av_ts2timestr(ist->next_dts, &AV_TIME_BASE_Q),
4649 av_ts2str(ist->next_pts), av_ts2timestr(ist->next_pts, &AV_TIME_BASE_Q),
4650 av_ts2str(pkt->pts), av_ts2timestr(pkt->pts, &ist->st->time_base),
4651 av_ts2str(pkt->dts), av_ts2timestr(pkt->dts, &ist->st->time_base),
4652 av_ts2str(input_files[ist->file_index]->ts_offset),
4653 av_ts2timestr(input_files[ist->file_index]->ts_offset, &AV_TIME_BASE_Q));
4654 }
4655
4656 if(!ist->wrap_correction_done && is->start_time != AV_NOPTS_VALUE && ist->st->pts_wrap_bits < 64){
4657 int64_t stime, stime2;
4658 // Correcting starttime based on the enabled streams
4659 // FIXME this ideally should be done before the first use of starttime but we do not know which are the enabled streams at that point.
4660 // so we instead do it here as part of discontinuity handling
4661 if ( ist->next_dts == AV_NOPTS_VALUE
4662 && ifile->ts_offset == -is->start_time
4663 && (is->iformat->flags & AVFMT_TS_DISCONT)) {
4664 int64_t new_start_time = INT64_MAX;
4665 for (i=0; i<is->nb_streams; i++) {
4666 AVStream *st = is->streams[i];
4667 if(st->discard == AVDISCARD_ALL || st->start_time == AV_NOPTS_VALUE)
4668 continue;
4669 new_start_time = FFMIN(new_start_time, av_rescale_q(st->start_time, st->time_base, AV_TIME_BASE_Q));
4670 }
4671 if (new_start_time > is->start_time) {
4672 av_log(is, AV_LOG_VERBOSE, "Correcting start time by %"PRId64"\n", new_start_time - is->start_time);
4673 ifile->ts_offset = -new_start_time;
4674 }
4675 }
4676
4677 stime = av_rescale_q(is->start_time, AV_TIME_BASE_Q, ist->st->time_base);
4678 stime2= stime + (1ULL<<ist->st->pts_wrap_bits);
4679 ist->wrap_correction_done = 1;
4680
4681 if(stime2 > stime && pkt->dts != AV_NOPTS_VALUE && pkt->dts > stime + (1LL<<(ist->st->pts_wrap_bits-1))) {
4682 pkt->dts -= 1ULL<<ist->st->pts_wrap_bits;
4683 ist->wrap_correction_done = 0;
4684 }
4685 if(stime2 > stime && pkt->pts != AV_NOPTS_VALUE && pkt->pts > stime + (1LL<<(ist->st->pts_wrap_bits-1))) {
4686 pkt->pts -= 1ULL<<ist->st->pts_wrap_bits;
4687 ist->wrap_correction_done = 0;
4688 }
4689 }
4690
4691 /* add the stream-global side data to the first packet */
4692 if (ist->nb_packets == 1) {
4693 for (i = 0; i < ist->st->nb_side_data; i++) {
4694 AVPacketSideData *src_sd = &ist->st->side_data[i];
4695 uint8_t *dst_data;
4696
4697 if (src_sd->type == AV_PKT_DATA_DISPLAYMATRIX)
4698 continue;
4699
4700 if (av_packet_get_side_data(pkt, src_sd->type, NULL))
4701 continue;
4702
4703 dst_data = av_packet_new_side_data(pkt, src_sd->type, src_sd->size);
4704 if (!dst_data)
4705 exit_program(1);
4706
4707 memcpy(dst_data, src_sd->data, src_sd->size);
4708 }
4709 }
4710
4711 if (pkt->dts != AV_NOPTS_VALUE)
4712 pkt->dts += av_rescale_q(ifile->ts_offset, AV_TIME_BASE_Q, ist->st->time_base);
4713 if (pkt->pts != AV_NOPTS_VALUE)
4714 pkt->pts += av_rescale_q(ifile->ts_offset, AV_TIME_BASE_Q, ist->st->time_base);
4715
4716 if (pkt->pts != AV_NOPTS_VALUE)
4717 pkt->pts *= ist->ts_scale;
4718 if (pkt->dts != AV_NOPTS_VALUE)
4719 pkt->dts *= ist->ts_scale;
4720
4721 pkt_dts = av_rescale_q_rnd(pkt->dts, ist->st->time_base, AV_TIME_BASE_Q, AV_ROUND_NEAR_INF|AV_ROUND_PASS_MINMAX);
4722 if ((ist->dec_ctx->codec_type == AVMEDIA_TYPE_VIDEO ||
4723 ist->dec_ctx->codec_type == AVMEDIA_TYPE_AUDIO) &&
4724 pkt_dts != AV_NOPTS_VALUE && ist->next_dts == AV_NOPTS_VALUE && !copy_ts
4725 && (is->iformat->flags & AVFMT_TS_DISCONT) && ifile->last_ts != AV_NOPTS_VALUE) {
4726 int64_t delta = pkt_dts - ifile->last_ts;
4727 if (delta < -1LL*dts_delta_threshold*AV_TIME_BASE ||
4728 delta > 1LL*dts_delta_threshold*AV_TIME_BASE){
4729 ifile->ts_offset -= delta;
4730 av_log(NULL, AV_LOG_DEBUG,
4731 "Inter stream timestamp discontinuity %"PRId64", new offset= %"PRId64"\n",
4732 delta, ifile->ts_offset);
4733 pkt->dts -= av_rescale_q(delta, AV_TIME_BASE_Q, ist->st->time_base);
4734 if (pkt->pts != AV_NOPTS_VALUE)
4735 pkt->pts -= av_rescale_q(delta, AV_TIME_BASE_Q, ist->st->time_base);
4736 }
4737 }
4738
4739 duration = av_rescale_q(ifile->duration, ifile->time_base, ist->st->time_base);
4740 if (pkt->pts != AV_NOPTS_VALUE) {
4741 pkt->pts += duration;
4742 ist->max_pts = FFMAX(pkt->pts, ist->max_pts);
4743 ist->min_pts = FFMIN(pkt->pts, ist->min_pts);
4744 }
4745
4746 if (pkt->dts != AV_NOPTS_VALUE)
4747 pkt->dts += duration;
4748
4749 pkt_dts = av_rescale_q_rnd(pkt->dts, ist->st->time_base, AV_TIME_BASE_Q, AV_ROUND_NEAR_INF|AV_ROUND_PASS_MINMAX);
4750
4751 if (copy_ts && pkt_dts != AV_NOPTS_VALUE && ist->next_dts != AV_NOPTS_VALUE &&
4752 (is->iformat->flags & AVFMT_TS_DISCONT) && ist->st->pts_wrap_bits < 60) {
4753 int64_t wrap_dts = av_rescale_q_rnd(pkt->dts + (1LL<<ist->st->pts_wrap_bits),
4754 ist->st->time_base, AV_TIME_BASE_Q,
4755 AV_ROUND_NEAR_INF|AV_ROUND_PASS_MINMAX);
4756 if (FFABS(wrap_dts - ist->next_dts) < FFABS(pkt_dts - ist->next_dts)/10)
4757 disable_discontinuity_correction = 0;
4758 }
4759
4760 if ((ist->dec_ctx->codec_type == AVMEDIA_TYPE_VIDEO ||
4761 ist->dec_ctx->codec_type == AVMEDIA_TYPE_AUDIO) &&
4762 pkt_dts != AV_NOPTS_VALUE && ist->next_dts != AV_NOPTS_VALUE &&
4763 !disable_discontinuity_correction) {
4764 int64_t delta = pkt_dts - ist->next_dts;
4765 if (is->iformat->flags & AVFMT_TS_DISCONT) {
4766 if (delta < -1LL*dts_delta_threshold*AV_TIME_BASE ||
4767 delta > 1LL*dts_delta_threshold*AV_TIME_BASE ||
4768 pkt_dts + AV_TIME_BASE/10 < FFMAX(ist->pts, ist->dts)) {
4769 ifile->ts_offset -= delta;
4770 av_log(NULL, AV_LOG_DEBUG,
4771 "timestamp discontinuity for stream #%d:%d "
4772 "(id=%d, type=%s): %"PRId64", new offset= %"PRId64"\n",
4773 ist->file_index, ist->st->index, ist->st->id,
4774 av_get_media_type_string(ist->dec_ctx->codec_type),
4775 delta, ifile->ts_offset);
4776 pkt->dts -= av_rescale_q(delta, AV_TIME_BASE_Q, ist->st->time_base);
4777 if (pkt->pts != AV_NOPTS_VALUE)
4778 pkt->pts -= av_rescale_q(delta, AV_TIME_BASE_Q, ist->st->time_base);
4779 }
4780 } else {
4781 if ( delta < -1LL*dts_error_threshold*AV_TIME_BASE ||
4782 delta > 1LL*dts_error_threshold*AV_TIME_BASE) {
4783 av_log(NULL, AV_LOG_WARNING, "DTS %"PRId64", next:%"PRId64" st:%d invalid dropping\n", pkt->dts, ist->next_dts, pkt->stream_index);
4784 pkt->dts = AV_NOPTS_VALUE;
4785 }
4786 if (pkt->pts != AV_NOPTS_VALUE){
4787 int64_t pkt_pts = av_rescale_q(pkt->pts, ist->st->time_base, AV_TIME_BASE_Q);
4788 delta = pkt_pts - ist->next_dts;
4789 if ( delta < -1LL*dts_error_threshold*AV_TIME_BASE ||
4790 delta > 1LL*dts_error_threshold*AV_TIME_BASE) {
4791 av_log(NULL, AV_LOG_WARNING, "PTS %"PRId64", next:%"PRId64" invalid dropping st:%d\n", pkt->pts, ist->next_dts, pkt->stream_index);
4792 pkt->pts = AV_NOPTS_VALUE;
4793 }
4794 }
4795 }
4796 }
4797
4798 if (pkt->dts != AV_NOPTS_VALUE)
4799 ifile->last_ts = av_rescale_q(pkt->dts, ist->st->time_base, AV_TIME_BASE_Q);
4800
4801 if (debug_ts) {
4802 av_log(NULL, AV_LOG_INFO, "demuxer+ffmpeg -> ist_index:%d type:%s pkt_pts:%s pkt_pts_time:%s pkt_dts:%s pkt_dts_time:%s off:%s off_time:%s\n",
4803 ifile->ist_index + pkt->stream_index, av_get_media_type_string(ist->dec_ctx->codec_type),
4804 av_ts2str(pkt->pts), av_ts2timestr(pkt->pts, &ist->st->time_base),
4805 av_ts2str(pkt->dts), av_ts2timestr(pkt->dts, &ist->st->time_base),
4806 av_ts2str(input_files[ist->file_index]->ts_offset),
4807 av_ts2timestr(input_files[ist->file_index]->ts_offset, &AV_TIME_BASE_Q));
4808 }
4809
4810 sub2video_heartbeat(ist, pkt->pts);
4811
4812 process_input_packet(ist, pkt, 0);
4813
4814discard_packet:
4815#if HAVE_THREADS
4816 if (ifile->thread_queue_size)
4817 av_packet_free(&pkt);
4818 else
4819#endif
4820 av_packet_unref(pkt);
4821
4822 return 0;
4823}
4824
4833{
4834 int i, ret;
4835 int nb_requests, nb_requests_max = 0;
4836 InputFilter *ifilter;
4838
4839 *best_ist = NULL;
4840 ret = avfilter_graph_request_oldest(graph->graph);
4841 if (ret >= 0)
4842 return reap_filters(0);
4843
4844 if (ret == AVERROR_EOF) {
4845 ret = reap_filters(1);
4846 for (i = 0; i < graph->nb_outputs; i++)
4847 close_output_stream(graph->outputs[i]->ost);
4848 return ret;
4849 }
4850 if (ret != AVERROR(EAGAIN))
4851 return ret;
4852
4853 for (i = 0; i < graph->nb_inputs; i++) {
4854 ifilter = graph->inputs[i];
4855 ist = ifilter->ist;
4856 if (input_files[ist->file_index]->eagain ||
4857 input_files[ist->file_index]->eof_reached)
4858 continue;
4859 nb_requests = av_buffersrc_get_nb_failed_requests(ifilter->filter);
4860 if (nb_requests > nb_requests_max) {
4861 nb_requests_max = nb_requests;
4862 *best_ist = ist;
4863 }
4864 }
4865
4866 if (!*best_ist)
4867 for (i = 0; i < graph->nb_outputs; i++)
4868 graph->outputs[i]->ost->unavailable = 1;
4869
4870 return 0;
4871}
4872
4878static int transcode_step(void)
4879{
4881 InputStream *ist = NULL;
4882 int ret;
4883
4884 ost = choose_output();
4885 if (!ost) {
4886 if (got_eagain()) {
4887 reset_eagain();
4888 av_usleep(10000);
4889 return 0;
4890 }
4891 av_log(NULL, AV_LOG_VERBOSE, "No more inputs to read from, finishing.\n");
4892 return AVERROR_EOF;
4893 }
4894
4895 if (ost->filter && !ost->filter->graph->graph) {
4898 if (ret < 0) {
4899 av_log(NULL, AV_LOG_ERROR, "Error reinitializing filters!\n");
4900 return ret;
4901 }
4902 }
4903 }
4904
4905 if (ost->filter && ost->filter->graph->graph) {
4906 /*
4907 * Similar case to the early audio initialization in reap_filters.
4908 * Audio is special in ffmpeg.c currently as we depend on lavfi's
4909 * audio frame buffering/creation to get the output audio frame size
4910 * in samples correct. The audio frame size for the filter chain is
4911 * configured during the output stream initialization.
4912 *
4913 * Apparently avfilter_graph_request_oldest (called in
4914 * transcode_from_filter just down the line) peeks. Peeking already
4915 * puts one frame "ready to be given out", which means that any
4916 * update in filter buffer sink configuration afterwards will not
4917 * help us. And yes, even if it would be utilized,
4918 * av_buffersink_get_samples is affected, as it internally utilizes
4919 * the same early exit for peeked frames.
4920 *
4921 * In other words, if avfilter_graph_request_oldest would not make
4922 * further filter chain configuration or usage of
4923 * av_buffersink_get_samples useless (by just causing the return
4924 * of the peeked AVFrame as-is), we could get rid of this additional
4925 * early encoder initialization.
4926 */
4927 if (av_buffersink_get_type(ost->filter->filter) == AVMEDIA_TYPE_AUDIO)
4929
4930 if ((ret = transcode_from_filter(ost->filter->graph, &ist)) < 0)
4931 return ret;
4932 if (!ist)
4933 return 0;
4934 } else if (ost->filter) {
4935 int i;
4936 for (i = 0; i < ost->filter->graph->nb_inputs; i++) {
4937 InputFilter *ifilter = ost->filter->graph->inputs[i];
4938 if (!ifilter->ist->got_output && !input_files[ifilter->ist->file_index]->eof_reached) {
4939 ist = ifilter->ist;
4940 break;
4941 }
4942 }
4943 if (!ist) {
4944 ost->inputs_done = 1;
4945 return 0;
4946 }
4947 } else {
4948 av_assert0(ost->source_index >= 0);
4950 }
4951
4952 ret = process_input(ist->file_index);
4953 if (ret == AVERROR(EAGAIN)) {
4954 if (input_files[ist->file_index]->eagain)
4955 ost->unavailable = 1;
4956 return 0;
4957 }
4958
4959 if (ret < 0)
4960 return ret == AVERROR_EOF ? 0 : ret;
4961
4962 return reap_filters(0);
4963}
4964
4965/*
4966 * The following code is the main loop of the file converter
4967 */
4968static int transcode(void)
4969{
4970 int ret, i;
4971 AVFormatContext *os;
4974 int64_t timer_start;
4975 int64_t total_packets_written = 0;
4976
4977 ret = transcode_init();
4978 if (ret < 0)
4979 goto fail;
4980
4981 if (stdin_interaction) {
4982 av_log(NULL, AV_LOG_INFO, "Press [q] to stop, [?] for help\n");
4983 }
4984
4985 timer_start = av_gettime_relative();
4986
4987#if HAVE_THREADS
4988 if ((ret = init_input_threads()) < 0)
4989 goto fail;
4990#endif
4991
4993 int64_t cur_time= av_gettime_relative();
4994
4995 /* if 'q' pressed, exits */
4997 if (check_keyboard_interaction(cur_time) < 0)
4998 break;
4999
5000 /* check if there's any stream where output is still needed */
5001 if (!need_output()) {
5002 av_log(NULL, AV_LOG_VERBOSE, "No more output streams to write to, finishing.\n");
5003 break;
5004 }
5005
5006 ret = transcode_step();
5007 if (ret < 0 && ret != AVERROR_EOF) {
5008 av_log(NULL, AV_LOG_ERROR, "Error while filtering: %s\n", av_err2str(ret));
5009 break;
5010 }
5011
5012 /* dump report by using the output first video and audio streams */
5013 print_report(0, timer_start, cur_time);
5014 }
5015#if HAVE_THREADS
5016 free_input_threads();
5017#endif
5018
5019 /* at the end of stream, we must flush the decoder buffers */
5020 for (i = 0; i < nb_input_streams; i++) {
5021 ist = input_streams[i];
5022 if (!input_files[ist->file_index]->eof_reached) {
5023 process_input_packet(ist, NULL, 0);
5024 }
5025 }
5027
5028 term_exit();
5029
5030 /* write the trailer if needed and close file */
5031 for (i = 0; i < nb_output_files; i++) {
5032 os = output_files[i]->ctx;
5033 if (!output_files[i]->header_written) {
5034 av_log(NULL, AV_LOG_ERROR,
5035 "Nothing was written into output file %d (%s), because "
5036 "at least one of its streams received no packets.\n",
5037 i, os->url);
5038 continue;
5039 }
5040 if ((ret = av_write_trailer(os)) < 0) {
5041 av_log(NULL, AV_LOG_ERROR, "Error writing trailer of %s: %s\n", os->url, av_err2str(ret));
5042 if (exit_on_error)
5043 exit_program(1);
5044 }
5045 }
5046
5047 /* dump report by using the first video and audio streams */
5048 print_report(1, timer_start, av_gettime_relative());
5049
5050 /* close each encoder */
5051 for (i = 0; i < nb_output_streams; i++) {
5052 ost = output_streams[i];
5053 if (ost->encoding_needed) {
5054 av_freep(&ost->enc_ctx->stats_in);
5055 }
5056 total_packets_written += ost->packets_written;
5058 av_log(NULL, AV_LOG_FATAL, "Empty output on stream %d.\n", i);
5059 exit_program(1);
5060 }
5061 }
5062
5063 if (!total_packets_written && (abort_on_flags & ABORT_ON_FLAG_EMPTY_OUTPUT)) {
5064 av_log(NULL, AV_LOG_FATAL, "Empty output\n");
5065 exit_program(1);
5066 }
5067
5068 /* close each decoder */
5069 for (i = 0; i < nb_input_streams; i++) {
5070 ist = input_streams[i];
5071 if (ist->decoding_needed) {
5072 avcodec_close(ist->dec_ctx);
5073 if (ist->hwaccel_uninit)
5074 ist->hwaccel_uninit(ist->dec_ctx);
5075 }
5076 }
5077
5079
5080 /* finished ! */
5081 ret = 0;
5082
5083 fail:
5084#if HAVE_THREADS
5085 free_input_threads();
5086#endif
5087
5088 if (output_streams) {
5089 for (i = 0; i < nb_output_streams; i++) {
5090 ost = output_streams[i];
5091 if (ost) {
5092 if (ost->logfile) {
5093 if (fclose(ost->logfile))
5094 av_log(NULL, AV_LOG_ERROR,
5095 "Error closing logfile, loss of information possible: %s\n",
5096 av_err2str(AVERROR(errno)));
5097 ost->logfile = NULL;
5098 }
5099 av_freep(&ost->forced_kf_pts);
5100 av_freep(&ost->apad);
5101 av_freep(&ost->disposition);
5102 av_dict_free(&ost->encoder_opts);
5103 av_dict_free(&ost->sws_dict);
5104 av_dict_free(&ost->swr_opts);
5105 av_dict_free(&ost->resample_opts);
5106 }
5107 }
5108 }
5109 return ret;
5110}
5111
5113{
5114 BenchmarkTimeStamps time_stamps = { av_gettime_relative() };
5115#if HAVE_GETRUSAGE
5116 struct rusage rusage;
5117
5118 getrusage(RUSAGE_SELF, &rusage);
5119 time_stamps.user_usec =
5120 (rusage.ru_utime.tv_sec * 1000000LL) + rusage.ru_utime.tv_usec;
5121 time_stamps.sys_usec =
5122 (rusage.ru_stime.tv_sec * 1000000LL) + rusage.ru_stime.tv_usec;
5123#elif HAVE_GETPROCESSTIMES
5124 HANDLE proc;
5125 FILETIME c, e, k, u;
5126 proc = GetCurrentProcess();
5127 GetProcessTimes(proc, &c, &e, &k, &u);
5128 time_stamps.user_usec =
5129 ((int64_t)u.dwHighDateTime << 32 | u.dwLowDateTime) / 10;
5130 time_stamps.sys_usec =
5131 ((int64_t)k.dwHighDateTime << 32 | k.dwLowDateTime) / 10;
5132#else
5133 time_stamps.user_usec = time_stamps.sys_usec = 0;
5134#endif
5135 return time_stamps;
5136}
5137
5138static int64_t getmaxrss(void)
5139{
5140#if HAVE_GETRUSAGE && HAVE_STRUCT_RUSAGE_RU_MAXRSS
5141 struct rusage rusage;
5142 getrusage(RUSAGE_SELF, &rusage);
5143 return (int64_t)rusage.ru_maxrss * 1024;
5144#elif HAVE_GETPROCESSMEMORYINFO
5145 HANDLE proc;
5146 PROCESS_MEMORY_COUNTERS memcounters;
5147 proc = GetCurrentProcess();
5148 memcounters.cb = sizeof(memcounters);
5149 GetProcessMemoryInfo(proc, &memcounters, sizeof(memcounters));
5150 return memcounters.PeakPagefileUsage;
5151#else
5152 return 0;
5153#endif
5154}
5155
5156static void log_callback_null(void *ptr, int level, const char *fmt, va_list vl)
5157{
5158}
5159
5162 longjmp_value = 0;
5163 received_sigterm = 0;
5165 ffmpeg_exited = 0;
5166 copy_ts_first_pts = AV_NOPTS_VALUE;
5167
5168 run_as_daemon = 0;
5169 nb_frames_dup = 0;
5170 dup_warning = 1000;
5171 nb_frames_drop = 0;
5172 nb_output_dumped = 0;
5173
5174 want_sdp = 1;
5175
5176 progress_avio = NULL;
5177
5178 input_streams = NULL;
5179 nb_input_streams = 0;
5180 input_files = NULL;
5181 nb_input_files = 0;
5182
5183 output_streams = NULL;
5185 output_files = NULL;
5186 nb_output_files = 0;
5187
5188 filtergraphs = NULL;
5189 nb_filtergraphs = 0;
5190
5191 last_time = -1;
5193 first_report = 1;
5194}
5195
5196void set_report_callback(void (*callback)(int, float, float, int64_t, int, double, double))
5197{
5198 report_callback = callback;
5199}
5200
5201void cancel_operation(long id)
5202{
5203 if (id == 0) {
5204 sigterm_handler(SIGINT);
5205 } else {
5206 cancelSession(id);
5207 }
5208}
5209
5210__thread OptionDef *ffmpeg_options = NULL;
5211
5212int ffmpeg_execute(int argc, char **argv)
5213{
5214 char _program_name[] = "ffmpeg";
5215 program_name = (char*)&_program_name;
5216 program_birth_year = 2000;
5217
5218 #define OFFSET(x) offsetof(OptionsContext, x)
5219 OptionDef options[] = {
5220
5221 /* main options */
5222 { "L", OPT_EXIT, { .func_arg = show_license }, "show license" },
5223 { "h", OPT_EXIT, { .func_arg = show_help }, "show help", "topic" },
5224 { "?", OPT_EXIT, { .func_arg = show_help }, "show help", "topic" },
5225 { "help", OPT_EXIT, { .func_arg = show_help }, "show help", "topic" },
5226 { "-help", OPT_EXIT, { .func_arg = show_help }, "show help", "topic" },
5227 { "version", OPT_EXIT, { .func_arg = show_version }, "show version" },
5228 { "buildconf", OPT_EXIT, { .func_arg = show_buildconf }, "show build configuration" },
5229 { "formats", OPT_EXIT, { .func_arg = show_formats }, "show available formats" },
5230 { "muxers", OPT_EXIT, { .func_arg = show_muxers }, "show available muxers" },
5231 { "demuxers", OPT_EXIT, { .func_arg = show_demuxers }, "show available demuxers" },
5232 { "devices", OPT_EXIT, { .func_arg = show_devices }, "show available devices" },
5233 { "codecs", OPT_EXIT, { .func_arg = show_codecs }, "show available codecs" },
5234 { "decoders", OPT_EXIT, { .func_arg = show_decoders }, "show available decoders" },
5235 { "encoders", OPT_EXIT, { .func_arg = show_encoders }, "show available encoders" },
5236 { "bsfs", OPT_EXIT, { .func_arg = show_bsfs }, "show available bit stream filters" },
5237 { "protocols", OPT_EXIT, { .func_arg = show_protocols }, "show available protocols" },
5238 { "filters", OPT_EXIT, { .func_arg = show_filters }, "show available filters" },
5239 { "pix_fmts", OPT_EXIT, { .func_arg = show_pix_fmts }, "show available pixel formats" },
5240 { "layouts", OPT_EXIT, { .func_arg = show_layouts }, "show standard channel layouts" },
5241 { "sample_fmts", OPT_EXIT, { .func_arg = show_sample_fmts }, "show available audio sample formats" },
5242 { "colors", OPT_EXIT, { .func_arg = show_colors }, "show available color names" },
5243 { "loglevel", HAS_ARG, { .func_arg = opt_loglevel }, "set logging level", "loglevel" },
5244 { "v", HAS_ARG, { .func_arg = opt_loglevel }, "set logging level", "loglevel" },
5245 { "report", 0, { .func_arg = opt_report }, "generate a report" },
5246 { "max_alloc", HAS_ARG, { .func_arg = opt_max_alloc }, "set maximum size of a single allocated block", "bytes" },
5247 { "cpuflags", HAS_ARG | OPT_EXPERT, { .func_arg = opt_cpuflags }, "force specific cpu flags", "flags" },
5248 { "cpucount", HAS_ARG | OPT_EXPERT, { .func_arg = opt_cpucount }, "force specific cpu count", "count" },
5249 { "hide_banner", OPT_BOOL | OPT_EXPERT, {&hide_banner}, "do not show program banner", "hide_banner" },
5250
5251 #if CONFIG_AVDEVICE
5252 { "sources" , OPT_EXIT | HAS_ARG, { .func_arg = show_sources },
5253 "list sources of the input device", "device" },
5254 { "sinks" , OPT_EXIT | HAS_ARG, { .func_arg = show_sinks },
5255 "list sinks of the output device", "device" },
5256 #endif
5257
5258 { "f", HAS_ARG | OPT_STRING | OPT_OFFSET |
5259 OPT_INPUT | OPT_OUTPUT, { .off = OFFSET(format) },
5260 "force format", "fmt" },
5261 { "y", OPT_BOOL, { &file_overwrite },
5262 "overwrite output files" },
5263 { "n", OPT_BOOL, { &no_file_overwrite },
5264 "never overwrite output files" },
5265 { "ignore_unknown", OPT_BOOL, { &ignore_unknown_streams },
5266 "Ignore unknown stream types" },
5267 { "copy_unknown", OPT_BOOL | OPT_EXPERT, { &copy_unknown_streams },
5268 "Copy unknown stream types" },
5269 { "c", HAS_ARG | OPT_STRING | OPT_SPEC |
5270 OPT_INPUT | OPT_OUTPUT, { .off = OFFSET(codec_names) },
5271 "codec name", "codec" },
5272 { "codec", HAS_ARG | OPT_STRING | OPT_SPEC |
5273 OPT_INPUT | OPT_OUTPUT, { .off = OFFSET(codec_names) },
5274 "codec name", "codec" },
5275 { "pre", HAS_ARG | OPT_STRING | OPT_SPEC |
5276 OPT_OUTPUT, { .off = OFFSET(presets) },
5277 "preset name", "preset" },
5278 { "map", HAS_ARG | OPT_EXPERT | OPT_PERFILE |
5279 OPT_OUTPUT, { .func_arg = opt_map },
5280 "set input stream mapping",
5281 "[-]input_file_id[:stream_specifier][,sync_file_id[:stream_specifier]]" },
5282 { "map_channel", HAS_ARG | OPT_EXPERT | OPT_PERFILE | OPT_OUTPUT, { .func_arg = opt_map_channel },
5283 "map an audio channel from one stream to another", "file.stream.channel[:syncfile.syncstream]" },
5284 { "map_metadata", HAS_ARG | OPT_STRING | OPT_SPEC |
5285 OPT_OUTPUT, { .off = OFFSET(metadata_map) },
5286 "set metadata information of outfile from infile",
5287 "outfile[,metadata]:infile[,metadata]" },
5288 { "map_chapters", HAS_ARG | OPT_INT | OPT_EXPERT | OPT_OFFSET |
5289 OPT_OUTPUT, { .off = OFFSET(chapters_input_file) },
5290 "set chapters mapping", "input_file_index" },
5291 { "t", HAS_ARG | OPT_TIME | OPT_OFFSET |
5292 OPT_INPUT | OPT_OUTPUT, { .off = OFFSET(recording_time) },
5293 "record or transcode \"duration\" seconds of audio/video",
5294 "duration" },
5295 { "to", HAS_ARG | OPT_TIME | OPT_OFFSET | OPT_INPUT | OPT_OUTPUT, { .off = OFFSET(stop_time) },
5296 "record or transcode stop time", "time_stop" },
5297 { "fs", HAS_ARG | OPT_INT64 | OPT_OFFSET | OPT_OUTPUT, { .off = OFFSET(limit_filesize) },
5298 "set the limit file size in bytes", "limit_size" },
5299 { "ss", HAS_ARG | OPT_TIME | OPT_OFFSET |
5300 OPT_INPUT | OPT_OUTPUT, { .off = OFFSET(start_time) },
5301 "set the start time offset", "time_off" },
5302 { "sseof", HAS_ARG | OPT_TIME | OPT_OFFSET |
5303 OPT_INPUT, { .off = OFFSET(start_time_eof) },
5304 "set the start time offset relative to EOF", "time_off" },
5305 { "seek_timestamp", HAS_ARG | OPT_INT | OPT_OFFSET |
5306 OPT_INPUT, { .off = OFFSET(seek_timestamp) },
5307 "enable/disable seeking by timestamp with -ss" },
5308 { "accurate_seek", OPT_BOOL | OPT_OFFSET | OPT_EXPERT |
5309 OPT_INPUT, { .off = OFFSET(accurate_seek) },
5310 "enable/disable accurate seeking with -ss" },
5311 { "itsoffset", HAS_ARG | OPT_TIME | OPT_OFFSET |
5312 OPT_EXPERT | OPT_INPUT, { .off = OFFSET(input_ts_offset) },
5313 "set the input ts offset", "time_off" },
5314 { "itsscale", HAS_ARG | OPT_DOUBLE | OPT_SPEC |
5315 OPT_EXPERT | OPT_INPUT, { .off = OFFSET(ts_scale) },
5316 "set the input ts scale", "scale" },
5317 { "timestamp", HAS_ARG | OPT_PERFILE | OPT_OUTPUT, { .func_arg = opt_recording_timestamp },
5318 "set the recording timestamp ('now' to set the current time)", "time" },
5319 { "metadata", HAS_ARG | OPT_STRING | OPT_SPEC | OPT_OUTPUT, { .off = OFFSET(metadata) },
5320 "add metadata", "string=string" },
5321 { "program", HAS_ARG | OPT_STRING | OPT_SPEC | OPT_OUTPUT, { .off = OFFSET(program) },
5322 "add program with specified streams", "title=string:st=number..." },
5323 { "dframes", HAS_ARG | OPT_PERFILE | OPT_EXPERT |
5324 OPT_OUTPUT, { .func_arg = opt_data_frames },
5325 "set the number of data frames to output", "number" },
5326 { "benchmark", OPT_BOOL | OPT_EXPERT, { &do_benchmark },
5327 "add timings for benchmarking" },
5328 { "benchmark_all", OPT_BOOL | OPT_EXPERT, { &do_benchmark_all },
5329 "add timings for each task" },
5330 { "progress", HAS_ARG | OPT_EXPERT, { .func_arg = opt_progress },
5331 "write program-readable progress information", "url" },
5332 { "stdin", OPT_BOOL | OPT_EXPERT, { &stdin_interaction },
5333 "enable or disable interaction on standard input" },
5334 { "timelimit", HAS_ARG | OPT_EXPERT, { .func_arg = opt_timelimit },
5335 "set max runtime in seconds in CPU user time", "limit" },
5336 { "dump", OPT_BOOL | OPT_EXPERT, { &do_pkt_dump },
5337 "dump each input packet" },
5338 { "hex", OPT_BOOL | OPT_EXPERT, { &do_hex_dump },
5339 "when dumping packets, also dump the payload" },
5340 { "re", OPT_BOOL | OPT_EXPERT | OPT_OFFSET |
5341 OPT_INPUT, { .off = OFFSET(rate_emu) },
5342 "read input at native frame rate", "" },
5343 { "target", HAS_ARG | OPT_PERFILE | OPT_OUTPUT, { .func_arg = opt_target },
5344 "specify target file type (\"vcd\", \"svcd\", \"dvd\", \"dv\" or \"dv50\" "
5345 "with optional prefixes \"pal-\", \"ntsc-\" or \"film-\")", "type" },
5346 { "vsync", HAS_ARG | OPT_EXPERT, { .func_arg = opt_vsync },
5347 "video sync method", "" },
5348 { "frame_drop_threshold", HAS_ARG | OPT_FLOAT | OPT_EXPERT, { &frame_drop_threshold },
5349 "frame drop threshold", "" },
5350 { "async", HAS_ARG | OPT_INT | OPT_EXPERT, { &audio_sync_method },
5351 "audio sync method", "" },
5352 { "adrift_threshold", HAS_ARG | OPT_FLOAT | OPT_EXPERT, { &audio_drift_threshold },
5353 "audio drift threshold", "threshold" },
5354 { "copyts", OPT_BOOL | OPT_EXPERT, { &copy_ts },
5355 "copy timestamps" },
5356 { "start_at_zero", OPT_BOOL | OPT_EXPERT, { &start_at_zero },
5357 "shift input timestamps to start at 0 when using copyts" },
5358 { "copytb", HAS_ARG | OPT_INT | OPT_EXPERT, { &copy_tb },
5359 "copy input stream time base when stream copying", "mode" },
5360 { "start_at_zero", OPT_BOOL | OPT_EXPERT, { &start_at_zero },
5361 "shift input timestamps to start at 0 when using copyts" },
5362 { "copytb", HAS_ARG | OPT_INT | OPT_EXPERT, { &copy_tb },
5363 "copy input stream time base when stream copying", "mode" },
5364 { "shortest", OPT_BOOL | OPT_EXPERT | OPT_OFFSET |
5365 OPT_OUTPUT, { .off = OFFSET(shortest) },
5366 "finish encoding within shortest input" },
5367 { "bitexact", OPT_BOOL | OPT_EXPERT | OPT_OFFSET |
5368 OPT_OUTPUT | OPT_INPUT, { .off = OFFSET(bitexact) },
5369 "bitexact mode" },
5370 { "apad", OPT_STRING | HAS_ARG | OPT_SPEC |
5371 OPT_OUTPUT, { .off = OFFSET(apad) },
5372 "audio pad", "" },
5373 { "dts_delta_threshold", HAS_ARG | OPT_FLOAT | OPT_EXPERT, { &dts_delta_threshold },
5374 "timestamp discontinuity delta threshold", "threshold" },
5375 { "dts_error_threshold", HAS_ARG | OPT_FLOAT | OPT_EXPERT, { &dts_error_threshold },
5376 "timestamp error delta threshold", "threshold" },
5377 { "xerror", OPT_BOOL | OPT_EXPERT, { &exit_on_error },
5378 "exit on error", "error" },
5379 { "abort_on", HAS_ARG | OPT_EXPERT, { .func_arg = opt_abort_on },
5380 "abort on the specified condition flags", "flags" },
5381 { "copyinkf", OPT_BOOL | OPT_EXPERT | OPT_SPEC |
5382 OPT_OUTPUT, { .off = OFFSET(copy_initial_nonkeyframes) },
5383 "copy initial non-keyframes" },
5384 { "copypriorss", OPT_INT | HAS_ARG | OPT_EXPERT | OPT_SPEC | OPT_OUTPUT, { .off = OFFSET(copy_prior_start) },
5385 "copy or discard frames before start time" },
5386 { "frames", OPT_INT64 | HAS_ARG | OPT_SPEC | OPT_OUTPUT, { .off = OFFSET(max_frames) },
5387 "set the number of frames to output", "number" },
5388 { "tag", OPT_STRING | HAS_ARG | OPT_SPEC |
5389 OPT_EXPERT | OPT_OUTPUT | OPT_INPUT, { .off = OFFSET(codec_tags) },
5390 "force codec tag/fourcc", "fourcc/tag" },
5391 { "q", HAS_ARG | OPT_EXPERT | OPT_DOUBLE |
5392 OPT_SPEC | OPT_OUTPUT, { .off = OFFSET(qscale) },
5393 "use fixed quality scale (VBR)", "q" },
5394 { "qscale", HAS_ARG | OPT_EXPERT | OPT_PERFILE |
5395 OPT_OUTPUT, { .func_arg = opt_qscale },
5396 "use fixed quality scale (VBR)", "q" },
5397 { "profile", HAS_ARG | OPT_EXPERT | OPT_PERFILE | OPT_OUTPUT, { .func_arg = opt_profile },
5398 "set profile", "profile" },
5399 { "filter", HAS_ARG | OPT_STRING | OPT_SPEC | OPT_OUTPUT, { .off = OFFSET(filters) },
5400 "set stream filtergraph", "filter_graph" },
5401 { "filter_threads", HAS_ARG | OPT_INT, { &filter_nbthreads },
5402 "number of non-complex filter threads" },
5403 { "filter_script", HAS_ARG | OPT_STRING | OPT_SPEC | OPT_OUTPUT, { .off = OFFSET(filter_scripts) },
5404 "read stream filtergraph description from a file", "filename" },
5405 { "reinit_filter", HAS_ARG | OPT_INT | OPT_SPEC | OPT_INPUT, { .off = OFFSET(reinit_filters) },
5406 "reinit filtergraph on input parameter changes", "" },
5407 { "filter_complex", HAS_ARG | OPT_EXPERT, { .func_arg = opt_filter_complex },
5408 "create a complex filtergraph", "graph_description" },
5409 { "filter_complex_threads", HAS_ARG | OPT_INT, { &filter_complex_nbthreads },
5410 "number of threads for -filter_complex" },
5411 { "lavfi", HAS_ARG | OPT_EXPERT, { .func_arg = opt_filter_complex },
5412 "create a complex filtergraph", "graph_description" },
5413 { "filter_complex_script", HAS_ARG | OPT_EXPERT, { .func_arg = opt_filter_complex_script },
5414 "read complex filtergraph description from a file", "filename" },
5415 { "auto_conversion_filters", OPT_BOOL | OPT_EXPERT, { &auto_conversion_filters },
5416 "enable automatic conversion filters globally" },
5417 { "stats", OPT_BOOL, { &print_stats },
5418 "print progress report during encoding", },
5419 { "stats_period", HAS_ARG | OPT_EXPERT, { .func_arg = opt_stats_period },
5420 "set the period at which ffmpeg updates stats and -progress output", "time" },
5421 { "attach", HAS_ARG | OPT_PERFILE | OPT_EXPERT |
5422 OPT_OUTPUT, { .func_arg = opt_attach },
5423 "add an attachment to the output file", "filename" },
5424 { "dump_attachment", HAS_ARG | OPT_STRING | OPT_SPEC |
5426 "extract an attachment into a file", "filename" },
5427 { "stream_loop", OPT_INT | HAS_ARG | OPT_EXPERT | OPT_INPUT |
5428 OPT_OFFSET, { .off = OFFSET(loop) }, "set number of times input stream shall be looped", "loop count" },
5429 { "debug_ts", OPT_BOOL | OPT_EXPERT, { &debug_ts },
5430 "print timestamp debugging info" },
5431 { "max_error_rate", HAS_ARG | OPT_FLOAT, { &max_error_rate },
5432 "ratio of errors (0.0: no errors, 1.0: 100% errors) above which ffmpeg returns an error instead of success.", "maximum error rate" },
5433 { "discard", OPT_STRING | HAS_ARG | OPT_SPEC |
5434 OPT_INPUT, { .off = OFFSET(discard) },
5435 "discard", "" },
5436 { "disposition", OPT_STRING | HAS_ARG | OPT_SPEC |
5437 OPT_OUTPUT, { .off = OFFSET(disposition) },
5438 "disposition", "" },
5439 { "thread_queue_size", HAS_ARG | OPT_INT | OPT_OFFSET | OPT_EXPERT | OPT_INPUT,
5440 { .off = OFFSET(thread_queue_size) },
5441 "set the maximum number of queued packets from the demuxer" },
5442 { "find_stream_info", OPT_BOOL | OPT_PERFILE | OPT_INPUT | OPT_EXPERT, { &find_stream_info },
5443 "read and decode the streams to fill missing information with heuristics" },
5444
5445 /* video options */
5446 { "vframes", OPT_VIDEO | HAS_ARG | OPT_PERFILE | OPT_OUTPUT, { .func_arg = opt_video_frames },
5447 "set the number of video frames to output", "number" },
5448 { "r", OPT_VIDEO | HAS_ARG | OPT_STRING | OPT_SPEC |
5449 OPT_INPUT | OPT_OUTPUT, { .off = OFFSET(frame_rates) },
5450 "set frame rate (Hz value, fraction or abbreviation)", "rate" },
5452 OPT_INPUT | OPT_OUTPUT, { .off = OFFSET(frame_sizes) },
5453 "set frame size (WxH or abbreviation)", "size" },
5454 { "aspect", OPT_VIDEO | HAS_ARG | OPT_STRING | OPT_SPEC |
5455 OPT_OUTPUT, { .off = OFFSET(frame_aspect_ratios) },
5456 "set aspect ratio (4:3, 16:9 or 1.3333, 1.7777)", "aspect" },
5457 { "pix_fmt", OPT_VIDEO | HAS_ARG | OPT_EXPERT | OPT_STRING | OPT_SPEC |
5458 OPT_INPUT | OPT_OUTPUT, { .off = OFFSET(frame_pix_fmts) },
5459 "set pixel format", "format" },
5460 { "bits_per_raw_sample", OPT_VIDEO | OPT_INT | HAS_ARG, { &frame_bits_per_raw_sample },
5461 "set the number of bits per raw sample", "number" },
5462 { "intra", OPT_VIDEO | OPT_BOOL | OPT_EXPERT, { &intra_only },
5463 "deprecated use -g 1" },
5464 { "vn", OPT_VIDEO | OPT_BOOL | OPT_OFFSET | OPT_INPUT | OPT_OUTPUT,{ .off = OFFSET(video_disable) },
5465 "disable video" },
5466 { "rc_override", OPT_VIDEO | HAS_ARG | OPT_EXPERT | OPT_STRING | OPT_SPEC |
5467 OPT_OUTPUT, { .off = OFFSET(rc_overrides) },
5468 "rate control override for specific intervals", "override" },
5469 { "vcodec", OPT_VIDEO | HAS_ARG | OPT_PERFILE | OPT_INPUT |
5470 OPT_OUTPUT, { .func_arg = opt_video_codec },
5471 "force video codec ('copy' to copy stream)", "codec" },
5472 { "sameq", OPT_VIDEO | OPT_EXPERT , { .func_arg = opt_sameq },
5473 "Removed" },
5474 { "same_quant", OPT_VIDEO | OPT_EXPERT , { .func_arg = opt_sameq },
5475 "Removed" },
5476 { "timecode", OPT_VIDEO | HAS_ARG | OPT_PERFILE | OPT_OUTPUT, { .func_arg = opt_timecode },
5477 "set initial TimeCode value.", "hh:mm:ss[:;.]ff" },
5478 { "pass", OPT_VIDEO | HAS_ARG | OPT_SPEC | OPT_INT | OPT_OUTPUT, { .off = OFFSET(pass) },
5479 "select the pass number (1 to 3)", "n" },
5480 { "passlogfile", OPT_VIDEO | HAS_ARG | OPT_STRING | OPT_EXPERT | OPT_SPEC |
5481 OPT_OUTPUT, { .off = OFFSET(passlogfiles) },
5482 "select two pass log file name prefix", "prefix" },
5483 { "deinterlace", OPT_VIDEO | OPT_BOOL | OPT_EXPERT, { &do_deinterlace },
5484 "this option is deprecated, use the yadif filter instead" },
5485 { "psnr", OPT_VIDEO | OPT_BOOL | OPT_EXPERT, { &do_psnr },
5486 "calculate PSNR of compressed frames" },
5487 { "vstats", OPT_VIDEO | OPT_EXPERT , { .func_arg = opt_vstats },
5488 "dump video coding statistics to file" },
5489 { "vstats_file", OPT_VIDEO | HAS_ARG | OPT_EXPERT , { .func_arg = opt_vstats_file },
5490 "dump video coding statistics to file", "file" },
5491 { "vstats_version", OPT_VIDEO | OPT_INT | HAS_ARG | OPT_EXPERT , { &vstats_version },
5492 "Version of the vstats format to use."},
5493 { "vf", OPT_VIDEO | HAS_ARG | OPT_PERFILE | OPT_OUTPUT, { .func_arg = opt_video_filters },
5494 "set video filters", "filter_graph" },
5495 { "intra_matrix", OPT_VIDEO | HAS_ARG | OPT_EXPERT | OPT_STRING | OPT_SPEC |
5496 OPT_OUTPUT, { .off = OFFSET(intra_matrices) },
5497 "specify intra matrix coeffs", "matrix" },
5498 { "inter_matrix", OPT_VIDEO | HAS_ARG | OPT_EXPERT | OPT_STRING | OPT_SPEC |
5499 OPT_OUTPUT, { .off = OFFSET(inter_matrices) },
5500 "specify inter matrix coeffs", "matrix" },
5501 { "chroma_intra_matrix", OPT_VIDEO | HAS_ARG | OPT_EXPERT | OPT_STRING | OPT_SPEC |
5502 OPT_OUTPUT, { .off = OFFSET(chroma_intra_matrices) },
5503 "specify intra matrix coeffs", "matrix" },
5504 { "top", OPT_VIDEO | HAS_ARG | OPT_EXPERT | OPT_INT| OPT_SPEC |
5505 OPT_INPUT | OPT_OUTPUT, { .off = OFFSET(top_field_first) },
5506 "top=1/bottom=0/auto=-1 field first", "" },
5507 { "vtag", OPT_VIDEO | HAS_ARG | OPT_EXPERT | OPT_PERFILE |
5508 OPT_INPUT | OPT_OUTPUT, { .func_arg = opt_old2new },
5509 "force video tag/fourcc", "fourcc/tag" },
5510 { "qphist", OPT_VIDEO | OPT_BOOL | OPT_EXPERT , { &qp_hist },
5511 "show QP histogram" },
5512 { "force_fps", OPT_VIDEO | OPT_BOOL | OPT_EXPERT | OPT_SPEC |
5513 OPT_OUTPUT, { .off = OFFSET(force_fps) },
5514 "force the selected framerate, disable the best supported framerate selection" },
5515 { "streamid", OPT_VIDEO | HAS_ARG | OPT_EXPERT | OPT_PERFILE |
5516 OPT_OUTPUT, { .func_arg = opt_streamid },
5517 "set the value of an outfile streamid", "streamIndex:value" },
5518 { "force_key_frames", OPT_VIDEO | OPT_STRING | HAS_ARG | OPT_EXPERT |
5519 OPT_SPEC | OPT_OUTPUT, { .off = OFFSET(forced_key_frames) },
5520 "force key frames at specified timestamps", "timestamps" },
5521 { "ab", OPT_VIDEO | HAS_ARG | OPT_PERFILE | OPT_OUTPUT, { .func_arg = opt_bitrate },
5522 "audio bitrate (please use -b:a)", "bitrate" },
5523 { "b", OPT_VIDEO | HAS_ARG | OPT_PERFILE | OPT_OUTPUT, { .func_arg = opt_bitrate },
5524 "video bitrate (please use -b:v)", "bitrate" },
5525 { "hwaccel", OPT_VIDEO | OPT_STRING | HAS_ARG | OPT_EXPERT |
5526 OPT_SPEC | OPT_INPUT, { .off = OFFSET(hwaccels) },
5527 "use HW accelerated decoding", "hwaccel name" },
5528 { "hwaccel_device", OPT_VIDEO | OPT_STRING | HAS_ARG | OPT_EXPERT |
5529 OPT_SPEC | OPT_INPUT, { .off = OFFSET(hwaccel_devices) },
5530 "select a device for HW acceleration", "devicename" },
5531 { "hwaccel_output_format", OPT_VIDEO | OPT_STRING | HAS_ARG | OPT_EXPERT |
5532 OPT_SPEC | OPT_INPUT, { .off = OFFSET(hwaccel_output_formats) },
5533 "select output format used with HW accelerated decoding", "format" },
5534 #if CONFIG_VIDEOTOOLBOX
5535 { "videotoolbox_pixfmt", HAS_ARG | OPT_STRING | OPT_EXPERT, { &videotoolbox_pixfmt}, "" },
5536 #endif
5537 { "hwaccels", OPT_EXIT, { .func_arg = show_hwaccels },
5538 "show available HW acceleration methods" },
5539 { "autorotate", HAS_ARG | OPT_BOOL | OPT_SPEC |
5540 OPT_EXPERT | OPT_INPUT, { .off = OFFSET(autorotate) },
5541 "automatically insert correct rotate filters" },
5542 { "autoscale", HAS_ARG | OPT_BOOL | OPT_SPEC |
5543 OPT_EXPERT | OPT_OUTPUT, { .off = OFFSET(autoscale) },
5544 "automatically insert a scale filter at the end of the filter graph" },
5545
5546 /* audio options */
5547 { "aframes", OPT_AUDIO | HAS_ARG | OPT_PERFILE | OPT_OUTPUT, { .func_arg = opt_audio_frames },
5548 "set the number of audio frames to output", "number" },
5549 { "aq", OPT_AUDIO | HAS_ARG | OPT_PERFILE | OPT_OUTPUT, { .func_arg = opt_audio_qscale },
5550 "set audio quality (codec-specific)", "quality", },
5551 { "ar", OPT_AUDIO | HAS_ARG | OPT_INT | OPT_SPEC |
5552 OPT_INPUT | OPT_OUTPUT, { .off = OFFSET(audio_sample_rate) },
5553 "set audio sampling rate (in Hz)", "rate" },
5554 { "ac", OPT_AUDIO | HAS_ARG | OPT_INT | OPT_SPEC |
5555 OPT_INPUT | OPT_OUTPUT, { .off = OFFSET(audio_channels) },
5556 "set number of audio channels", "channels" },
5557 { "an", OPT_AUDIO | OPT_BOOL | OPT_OFFSET | OPT_INPUT | OPT_OUTPUT,{ .off = OFFSET(audio_disable) },
5558 "disable audio" },
5559 { "acodec", OPT_AUDIO | HAS_ARG | OPT_PERFILE |
5560 OPT_INPUT | OPT_OUTPUT, { .func_arg = opt_audio_codec },
5561 "force audio codec ('copy' to copy stream)", "codec" },
5562 { "atag", OPT_AUDIO | HAS_ARG | OPT_EXPERT | OPT_PERFILE |
5563 OPT_OUTPUT, { .func_arg = opt_old2new },
5564 "force audio tag/fourcc", "fourcc/tag" },
5565 { "vol", OPT_AUDIO | HAS_ARG | OPT_INT, { &audio_volume },
5566 "change audio volume (256=normal)" , "volume" },
5567 { "sample_fmt", OPT_AUDIO | HAS_ARG | OPT_EXPERT | OPT_SPEC |
5568 OPT_STRING | OPT_INPUT | OPT_OUTPUT, { .off = OFFSET(sample_fmts) },
5569 "set sample format", "format" },
5570 { "channel_layout", OPT_AUDIO | HAS_ARG | OPT_EXPERT | OPT_PERFILE |
5571 OPT_INPUT | OPT_OUTPUT, { .func_arg = opt_channel_layout },
5572 "set channel layout", "layout" },
5573 { "af", OPT_AUDIO | HAS_ARG | OPT_PERFILE | OPT_OUTPUT, { .func_arg = opt_audio_filters },
5574 "set audio filters", "filter_graph" },
5575 { "guess_layout_max", OPT_AUDIO | HAS_ARG | OPT_INT | OPT_SPEC | OPT_EXPERT | OPT_INPUT, { .off = OFFSET(guess_layout_max) },
5576 "set the maximum number of channels to try to guess the channel layout" },
5577
5578 /* subtitle options */
5579 { "sn", OPT_SUBTITLE | OPT_BOOL | OPT_OFFSET | OPT_INPUT | OPT_OUTPUT, { .off = OFFSET(subtitle_disable) },
5580 "disable subtitle" },
5581 { "scodec", OPT_SUBTITLE | HAS_ARG | OPT_PERFILE | OPT_INPUT | OPT_OUTPUT, { .func_arg = opt_subtitle_codec },
5582 "force subtitle codec ('copy' to copy stream)", "codec" },
5583 { "stag", OPT_SUBTITLE | HAS_ARG | OPT_EXPERT | OPT_PERFILE | OPT_OUTPUT, { .func_arg = opt_old2new }
5584 , "force subtitle tag/fourcc", "fourcc/tag" },
5585 { "fix_sub_duration", OPT_BOOL | OPT_EXPERT | OPT_SUBTITLE | OPT_SPEC | OPT_INPUT, { .off = OFFSET(fix_sub_duration) },
5586 "fix subtitles duration" },
5587 { "canvas_size", OPT_SUBTITLE | HAS_ARG | OPT_STRING | OPT_SPEC | OPT_INPUT, { .off = OFFSET(canvas_sizes) },
5588 "set canvas size (WxH or abbreviation)", "size" },
5589
5590 /* grab options */
5591 { "vc", HAS_ARG | OPT_EXPERT | OPT_VIDEO, { .func_arg = opt_video_channel },
5592 "deprecated, use -channel", "channel" },
5593 { "tvstd", HAS_ARG | OPT_EXPERT | OPT_VIDEO, { .func_arg = opt_video_standard },
5594 "deprecated, use -standard", "standard" },
5595 { "isync", OPT_BOOL | OPT_EXPERT, { &input_sync }, "this option is deprecated and does nothing", "" },
5596
5597 /* muxer options */
5598 { "muxdelay", OPT_FLOAT | HAS_ARG | OPT_EXPERT | OPT_OFFSET | OPT_OUTPUT, { .off = OFFSET(mux_max_delay) },
5599 "set the maximum demux-decode delay", "seconds" },
5600 { "muxpreload", OPT_FLOAT | HAS_ARG | OPT_EXPERT | OPT_OFFSET | OPT_OUTPUT, { .off = OFFSET(mux_preload) },
5601 "set the initial demux-decode delay", "seconds" },
5602 { "sdp_file", HAS_ARG | OPT_EXPERT | OPT_OUTPUT, { .func_arg = opt_sdp_file },
5603 "specify a file in which to print sdp information", "file" },
5604
5605 { "time_base", HAS_ARG | OPT_STRING | OPT_EXPERT | OPT_SPEC | OPT_OUTPUT, { .off = OFFSET(time_bases) },
5606 "set the desired time base hint for output stream (1:24, 1:48000 or 0.04166, 2.0833e-5)", "ratio" },
5607 { "enc_time_base", HAS_ARG | OPT_STRING | OPT_EXPERT | OPT_SPEC | OPT_OUTPUT, { .off = OFFSET(enc_time_bases) },
5608 "set the desired time base for the encoder (1:24, 1:48000 or 0.04166, 2.0833e-5). "
5609 "two special values are defined - "
5610 "0 = use frame rate (video) or sample rate (audio),"
5611 "-1 = match source time base", "ratio" },
5612
5613 { "bsf", HAS_ARG | OPT_STRING | OPT_SPEC | OPT_EXPERT | OPT_OUTPUT, { .off = OFFSET(bitstream_filters) },
5614 "A comma-separated list of bitstream filters", "bitstream_filters" },
5615 { "absf", HAS_ARG | OPT_AUDIO | OPT_EXPERT| OPT_PERFILE | OPT_OUTPUT, { .func_arg = opt_old2new },
5616 "deprecated", "audio bitstream_filters" },
5617 { "vbsf", OPT_VIDEO | HAS_ARG | OPT_EXPERT| OPT_PERFILE | OPT_OUTPUT, { .func_arg = opt_old2new },
5618 "deprecated", "video bitstream_filters" },
5619
5620 { "apre", HAS_ARG | OPT_AUDIO | OPT_EXPERT| OPT_PERFILE | OPT_OUTPUT, { .func_arg = opt_preset },
5621 "set the audio options to the indicated preset", "preset" },
5622 { "vpre", OPT_VIDEO | HAS_ARG | OPT_EXPERT| OPT_PERFILE | OPT_OUTPUT, { .func_arg = opt_preset },
5623 "set the video options to the indicated preset", "preset" },
5624 { "spre", HAS_ARG | OPT_SUBTITLE | OPT_EXPERT| OPT_PERFILE | OPT_OUTPUT, { .func_arg = opt_preset },
5625 "set the subtitle options to the indicated preset", "preset" },
5626 { "fpre", HAS_ARG | OPT_EXPERT| OPT_PERFILE | OPT_OUTPUT, { .func_arg = opt_preset },
5627 "set options from indicated preset file", "filename" },
5628
5629 { "max_muxing_queue_size", HAS_ARG | OPT_INT | OPT_SPEC | OPT_EXPERT | OPT_OUTPUT, { .off = OFFSET(max_muxing_queue_size) },
5630 "maximum number of packets that can be buffered while waiting for all streams to initialize", "packets" },
5631 { "muxing_queue_data_threshold", HAS_ARG | OPT_INT | OPT_SPEC | OPT_EXPERT | OPT_OUTPUT, { .off = OFFSET(muxing_queue_data_threshold) },
5632 "set the threshold after which max_muxing_queue_size is taken into account", "bytes" },
5633
5634 /* data codec support */
5635 { "dcodec", HAS_ARG | OPT_DATA | OPT_PERFILE | OPT_EXPERT | OPT_INPUT | OPT_OUTPUT, { .func_arg = opt_data_codec },
5636 "force data codec ('copy' to copy stream)", "codec" },
5637 { "dn", OPT_BOOL | OPT_VIDEO | OPT_OFFSET | OPT_INPUT | OPT_OUTPUT, { .off = OFFSET(data_disable) },
5638 "disable data" },
5639
5640 #if CONFIG_VAAPI
5641 { "vaapi_device", HAS_ARG | OPT_EXPERT, { .func_arg = opt_vaapi_device },
5642 "set VAAPI hardware device (DRM path or X11 display name)", "device" },
5643 #endif
5644
5645 #if CONFIG_QSV
5646 { "qsv_device", HAS_ARG | OPT_STRING | OPT_EXPERT, { &qsv_device },
5647 "set QSV hardware device (DirectX adapter index, DRM path or X11 display name)", "device"},
5648 #endif
5649
5650 { "init_hw_device", HAS_ARG | OPT_EXPERT, { .func_arg = opt_init_hw_device },
5651 "initialise hardware device", "args" },
5652 { "filter_hw_device", HAS_ARG | OPT_EXPERT, { .func_arg = opt_filter_hw_device },
5653 "set hardware device used when filtering", "device" },
5654
5655 { NULL, },
5656 };
5657
5658 ffmpeg_options = options;
5659
5660 int i, ret;
5662
5663 int savedCode = setjmp(ex_buf__);
5664 if (savedCode == 0) {
5665
5667
5668 init_dynload();
5669
5671
5672 setvbuf(stderr,NULL,_IONBF,0); /* win32 runtime needs this */
5673
5674 av_log_set_flags(AV_LOG_SKIP_REPEATED);
5675 parse_loglevel(argc, argv, options);
5676
5677 if(argc>1 && !strcmp(argv[1], "-d")){
5678 run_as_daemon=1;
5679 av_log_set_callback(log_callback_null);
5680 argc--;
5681 argv++;
5682 }
5683
5684 #if CONFIG_AVDEVICE
5685 avdevice_register_all();
5686 #endif
5687 avformat_network_init();
5688
5689 show_banner(argc, argv, options);
5690
5691 /* parse options and open all input/output files */
5692 ret = ffmpeg_parse_options(argc, argv);
5693 if (ret < 0)
5694 exit_program(1);
5695
5696 if (nb_output_files <= 0 && nb_input_files == 0) {
5697 show_usage();
5698 av_log(NULL, AV_LOG_WARNING, "Use -h to get full help or, even better, run 'man %s'\n", program_name);
5699 exit_program(1);
5700 }
5701
5702 /* file converter / grab */
5703 if (nb_output_files <= 0) {
5704 av_log(NULL, AV_LOG_FATAL, "At least one output file must be specified\n");
5705 exit_program(1);
5706 }
5707
5708 for (i = 0; i < nb_output_files; i++) {
5709 if (strcmp(output_files[i]->ctx->oformat->name, "rtp"))
5710 want_sdp = 0;
5711 }
5712
5714 if (transcode() < 0)
5715 exit_program(1);
5716 if (do_benchmark) {
5717 int64_t utime, stime, rtime;
5719 utime = current_time.user_usec - ti.user_usec;
5720 stime = current_time.sys_usec - ti.sys_usec;
5721 rtime = current_time.real_usec - ti.real_usec;
5722 av_log(NULL, AV_LOG_INFO,
5723 "bench: utime=%0.3fs stime=%0.3fs rtime=%0.3fs\n",
5724 utime / 1000000.0, stime / 1000000.0, rtime / 1000000.0);
5725 }
5726 av_log(NULL, AV_LOG_DEBUG, "%"PRIu64" frames successfully decoded, %"PRIu64" decoding errors\n",
5729 exit_program(69);
5730
5732
5733 } else {
5735 }
5736
5738}
__thread jmp_buf ex_buf__
void exit_program(int ret)
int show_decoders(void *optctx, const char *opt, const char *arg)
int opt_loglevel(void *optctx, const char *opt, const char *arg)
int opt_cpuflags(void *optctx, const char *opt, const char *arg)
void init_dynload(void)
int show_help(void *optctx, const char *opt, const char *arg)
void print_error(const char *filename, int err)
int show_filters(void *optctx, const char *opt, const char *arg)
int show_sample_fmts(void *optctx, const char *opt, const char *arg)
int show_muxers(void *optctx, const char *opt, const char *arg)
int show_bsfs(void *optctx, const char *opt, const char *arg)
__thread char * program_name
int show_layouts(void *optctx, const char *opt, const char *arg)
int show_encoders(void *optctx, const char *opt, const char *arg)
int show_version(void *optctx, const char *opt, const char *arg)
void parse_loglevel(int argc, char **argv, const OptionDef *options)
__thread int program_birth_year
int opt_cpucount(void *optctx, const char *opt, const char *arg)
void show_banner(int argc, char **argv, const OptionDef *options)
int opt_timelimit(void *optctx, const char *opt, const char *arg)
int show_license(void *optctx, const char *opt, const char *arg)
int show_codecs(void *optctx, const char *opt, const char *arg)
int show_buildconf(void *optctx, const char *opt, const char *arg)
int64_t parse_time_or_die(const char *context, const char *timestr, int is_duration)
void register_exit(void(*cb)(int ret))
int show_devices(void *optctx, const char *opt, const char *arg)
void uninit_opts(void)
int show_formats(void *optctx, const char *opt, const char *arg)
__thread int hide_banner
int show_protocols(void *optctx, const char *opt, const char *arg)
int opt_max_alloc(void *optctx, const char *opt, const char *arg)
int opt_report(void *optctx, const char *opt, const char *arg)
int show_colors(void *optctx, const char *opt, const char *arg)
int show_pix_fmts(void *optctx, const char *opt, const char *arg)
int show_demuxers(void *optctx, const char *opt, const char *arg)
#define OPT_VIDEO
#define OPT_SPEC
#define OPT_BOOL
#define media_type_string
#define OPT_INT64
#define OPT_PERFILE
#define OPT_INT
#define OPT_FLOAT
#define AV_LOG_STDERR
#define OPT_INPUT
#define OPT_DOUBLE
#define OPT_STRING
__thread int find_stream_info
#define OPT_AUDIO
#define OPT_DATA
#define OPT_SUBTITLE
#define OPT_EXPERT
#define OPT_EXIT
#define OPT_OUTPUT
#define OPT_TIME
#define OPT_OFFSET
#define HAS_ARG
static InputStream * get_input_stream(OutputStream *ost)
int opt_channel_layout(void *optctx, const char *opt, const char *arg)
__thread unsigned dup_warning
int opt_sdp_file(void *optctx, const char *opt, const char *arg)
static int transcode(void)
static int transcode_subtitles(InputStream *ist, AVPacket *pkt, int *got_output, int *decode_failed)
int opt_timecode(void *optctx, const char *opt, const char *arg)
__thread OptionDef * ffmpeg_options
int opt_vstats_file(void *optctx, const char *opt, const char *arg)
__thread InputStream ** input_streams
static void set_tty_echo(int on)
static int decode_audio(InputStream *ist, AVPacket *pkt, int *got_output, int *decode_failed)
__thread const AVIOInterruptCB int_cb
__thread int run_as_daemon
static int check_keyboard_interaction(int64_t cur_time)
static void do_audio_out(OutputFile *of, OutputStream *ost, AVFrame *frame)
static BenchmarkTimeStamps get_benchmark_time_stamps(void)
int opt_data_codec(void *optctx, const char *opt, const char *arg)
int opt_streamid(void *optctx, const char *opt, const char *arg)
__thread int nb_input_streams
static int need_output(void)
void term_exit(void)
static volatile int received_sigterm
const char *const forced_keyframes_const_names[]
void cancelSession(long sessionId)
Definition: ffmpegkit.c:408
int opt_qscale(void *optctx, const char *opt, const char *arg)
int opt_sameq(void *optctx, const char *opt, const char *arg)
__thread OutputStream ** output_streams
static int decode(AVCodecContext *avctx, AVFrame *frame, int *got_frame, AVPacket *pkt)
__thread OutputFile ** output_files
int opt_filter_complex_script(void *optctx, const char *opt, const char *arg)
static void forward_report(int is_last_report, int64_t timer_start, int64_t cur_time)
int opt_filter_complex(void *optctx, const char *opt, const char *arg)
static int process_input_packet(InputStream *ist, const AVPacket *pkt, int no_eof)
static void do_subtitle_out(OutputFile *of, OutputStream *ost, AVSubtitle *sub)
int opt_vsync(void *optctx, const char *opt, const char *arg)
static int init_input_stream(int ist_index, char *error, int error_len)
__thread int nb_output_streams
static void sub2video_push_ref(InputStream *ist, int64_t pts)
int guess_input_channel_layout(InputStream *ist)
__thread volatile int longjmp_value
static void print_sdp(void)
__thread int nb_frames_dup
static int reap_filters(int flush)
static int check_recording_time(OutputStream *ost)
static void do_video_out(OutputFile *of, OutputStream *ost, AVFrame *next_picture)
__thread BenchmarkTimeStamps current_time
__thread int nb_input_files
static enum AVPixelFormat get_format(AVCodecContext *s, const enum AVPixelFormat *pix_fmts)
static void print_final_stats(int64_t total_size)
static int transcode_from_filter(FilterGraph *graph, InputStream **best_ist)
__thread int first_report
__thread int nb_output_files
static double psnr(double d)
static int init_output_bsfs(OutputStream *ost)
volatile int handleSIGINT
Definition: ffmpegkit.c:100
static int init_output_stream_encode(OutputStream *ost, AVFrame *frame)
static int send_frame_to_filters(InputStream *ist, AVFrame *decoded_frame)
int opt_subtitle_codec(void *optctx, const char *opt, const char *arg)
volatile int handleSIGTERM
Definition: ffmpegkit.c:101
int opt_video_standard(void *optctx, const char *opt, const char *arg)
void set_report_callback(void(*callback)(int, float, float, int64_t, int, double, double))
int opt_profile(void *optctx, const char *opt, const char *arg)
struct BenchmarkTimeStamps BenchmarkTimeStamps
static int64_t getmaxrss(void)
int opt_abort_on(void *optctx, const char *opt, const char *arg)
static void reset_eagain(void)
__thread int copy_unknown_streams
static void finish_output_stream(OutputStream *ost)
int opt_video_codec(void *optctx, const char *opt, const char *arg)
static void parse_forced_key_frames(char *kf, OutputStream *ost, AVCodecContext *avctx)
int opt_data_frames(void *optctx, const char *opt, const char *arg)
int opt_video_filters(void *optctx, const char *opt, const char *arg)
static int compare_int64(const void *a, const void *b)
__thread int input_sync
__thread volatile int ffmpeg_exited
int opt_attach(void *optctx, const char *opt, const char *arg)
int opt_filter_hw_device(void *optctx, const char *opt, const char *arg)
static int init_output_stream_wrapper(OutputStream *ost, AVFrame *frame, unsigned int fatal)
static int decode_video(InputStream *ist, AVPacket *pkt, int *got_output, int64_t *duration_pts, int eof, int *decode_failed)
__thread atomic_int transcode_init_done
static int ifilter_send_frame(InputFilter *ifilter, AVFrame *frame)
static void check_decode_result(InputStream *ist, int *got_output, int ret)
__thread int64_t keyboard_last_time
static void sub2video_heartbeat(InputStream *ist, int64_t pts)
int opt_audio_frames(void *optctx, const char *opt, const char *arg)
static int process_input(int file_index)
static void log_callback_null(void *ptr, int level, const char *fmt, va_list vl)
static int seek_to_start(InputFile *ifile, AVFormatContext *is)
__thread int no_file_overwrite
int opt_target(void *optctx, const char *opt, const char *arg)
__thread int qp_histogram[52]
static int check_output_constraints(InputStream *ist, OutputStream *ost)
__thread int file_overwrite
int opt_map(void *optctx, const char *opt, const char *arg)
static void write_packet(OutputFile *of, AVPacket *pkt, OutputStream *ost, int unqueue)
void cancel_operation(long id)
static void set_encoder_id(OutputFile *of, OutputStream *ost)
__thread AVIOContext * progress_avio
__thread int64_t copy_ts_first_pts
__thread InputFile ** input_files
static int read_key(void)
static int check_init_output_file(OutputFile *of, int file_index)
static void close_output_stream(OutputStream *ost)
static int get_buffer(AVCodecContext *s, AVFrame *frame, int flags)
void sub2video_update(InputStream *ist, int64_t heartbeat_pts, AVSubtitle *sub)
int opt_vstats(void *optctx, const char *opt, const char *arg)
int opt_video_channel(void *optctx, const char *opt, const char *arg)
__thread FilterGraph ** filtergraphs
static int init_output_stream(OutputStream *ost, AVFrame *frame, char *error, int error_len)
static int got_eagain(void)
int opt_video_frames(void *optctx, const char *opt, const char *arg)
__thread unsigned nb_output_dumped
static int send_filter_eof(InputStream *ist)
int decode_interrupt_cb(void *ctx)
int opt_audio_filters(void *optctx, const char *opt, const char *arg)
static void term_exit_sigsafe(void)
static void sub2video_flush(InputStream *ist)
static void sub2video_copy_rect(uint8_t *dst, int dst_linesize, int w, int h, AVSubtitleRect *r)
void remove_avoptions(AVDictionary **a, AVDictionary *b)
static int transcode_init(void)
static volatile int received_nb_signals
int opt_audio_qscale(void *optctx, const char *opt, const char *arg)
static FILE * vstats_file
void(* report_callback)(int, float, float, int64_t, int, double, double)
static void report_new_stream(int input_index, AVPacket *pkt)
__thread int64_t decode_error_stat[2]
__thread volatile int main_ffmpeg_return_code
static void abort_codec_experimental(const AVCodec *c, int encoder)
int show_hwaccels(void *optctx, const char *opt, const char *arg)
static void update_benchmark(const char *fmt,...)
static AVRational duration_max(int64_t tmp, int64_t *duration, AVRational tmp_time_base, AVRational time_base)
static void ffmpeg_cleanup(int ret)
int opt_preset(void *optctx, const char *opt, const char *arg)
static double adjust_frame_pts_to_encoder_tb(OutputFile *of, OutputStream *ost, AVFrame *frame)
static int transcode_step(void)
static int ifilter_has_all_input_formats(FilterGraph *fg)
static void close_all_output_streams(OutputStream *ost, OSTFinished this_stream, OSTFinished others)
static OutputStream * choose_output(void)
int opt_old2new(void *optctx, const char *opt, const char *arg)
static void print_report(int is_last_report, int64_t timer_start, int64_t cur_time)
__thread int nb_filtergraphs
static int init_output_stream_streamcopy(OutputStream *ost)
void term_init(void)
int opt_stats_period(void *optctx, const char *opt, const char *arg)
int cancelRequested(long sessionId)
Definition: ffmpegkit.c:418
__thread int64_t last_time
volatile int handleSIGPIPE
Definition: ffmpegkit.c:103
static int get_input_packet(InputFile *f, AVPacket **pkt)
#define OFFSET(x)
int opt_bitrate(void *optctx, const char *opt, const char *arg)
volatile int handleSIGXCPU
Definition: ffmpegkit.c:102
static void do_video_stats(OutputStream *ost, int frame_size)
static void do_streamcopy(InputStream *ist, OutputStream *ost, const AVPacket *pkt)
__thread uint8_t * subtitle_out
static int sub2video_get_blank_frame(InputStream *ist)
static void flush_encoders(void)
__thread int do_psnr
static void output_packet(OutputFile *of, AVPacket *pkt, OutputStream *ost, int eof)
int opt_map_channel(void *optctx, const char *opt, const char *arg)
int opt_progress(void *optctx, const char *opt, const char *arg)
int opt_audio_codec(void *optctx, const char *opt, const char *arg)
int opt_recording_timestamp(void *optctx, const char *opt, const char *arg)
void assert_avoptions(AVDictionary *m)
__thread int want_sdp
void ffmpeg_var_cleanup()
static void init_encoder_time_base(OutputStream *ost, AVRational default_time_base)
int ffmpeg_execute(int argc, char **argv)
static void ifilter_parameters_from_codecpar(InputFilter *ifilter, AVCodecParameters *par)
static int ifilter_send_eof(InputFilter *ifilter, int64_t pts)
__thread volatile long globalSessionId
Definition: ffmpegkit.c:106
__thread int ignore_unknown_streams
static void sigterm_handler(int sig)
__thread int intra_only
volatile int handleSIGQUIT
Definition: ffmpegkit.c:99
int opt_init_hw_device(void *optctx, const char *opt, const char *arg)
__thread int nb_frames_drop
__thread float dts_delta_threshold
int hw_device_setup_for_encode(OutputStream *ost)
__thread int copy_tb
@ HWACCEL_GENERIC
@ HWACCEL_AUTO
__thread int frame_bits_per_raw_sample
int ifilter_parameters_from_frame(InputFilter *ifilter, const AVFrame *frame)
#define VSYNC_DROP
__thread int64_t stats_period
__thread char * sdp_filename
__thread int print_stats
__thread int video_sync_method
__thread int filter_complex_nbthreads
__thread int abort_on_flags
__thread int audio_volume
__thread float max_error_rate
__thread int copy_ts
__thread int stdin_interaction
__thread float dts_error_threshold
int hwaccel_decode_init(AVCodecContext *avctx)
OSTFinished
@ ENCODER_FINISHED
@ MUXER_FINISHED
#define VSYNC_CFR
__thread int filter_nbthreads
void show_usage(void)
#define DECODING_FOR_FILTER
__thread int do_benchmark
__thread float frame_drop_threshold
__thread int vstats_version
__thread char * vstats_filename
__thread int do_deinterlace
int hw_device_setup_for_decode(InputStream *ist)
#define VSYNC_AUTO
void hw_device_free_all(void)
__thread int audio_sync_method
__thread float audio_drift_threshold
__thread int do_benchmark_all
__thread int start_at_zero
@ FKF_PREV_FORCED_N
@ FKF_T
@ FKF_PREV_FORCED_T
@ FKF_N_FORCED
@ FKF_N
__thread int exit_on_error
int ffmpeg_parse_options(int argc, char **argv)
#define ABORT_ON_FLAG_EMPTY_OUTPUT
#define DECODING_FOR_OST
__thread int qp_hist
#define VSYNC_VSCFR
int filtergraph_is_simple(FilterGraph *fg)
#define VSYNC_PASSTHROUGH
int configure_filtergraph(FilterGraph *fg)
__thread int do_hex_dump
__thread int do_pkt_dump
const HWAccel hwaccels[]
#define VSYNC_VFR
void dump_attachment(AVStream *st, const char *filename)
__thread int debug_ts
__thread char * videotoolbox_pixfmt
__thread int auto_conversion_filters
#define ABORT_ON_FLAG_EMPTY_OUTPUT_STREAM
fg inputs[0] ist
OutputStream * ost
ist filters[ist->nb_filters - 1]
ost filter
fg outputs[0] format
fg outputs[0] graph
OutputFilter ** outputs
const char * graph_desc
AVFilterGraph * graph
InputFilter ** inputs
enum HWAccelID id
int(* init)(AVCodecContext *s)
const char * name
AVPacket * pkt
int64_t ts_offset
int64_t duration
AVFormatContext * ctx
int64_t input_ts_offset
int64_t recording_time
AVRational time_base
int nb_streams_warn
int64_t last_ts
float readrate
int64_t start_time
AVBufferRef * hw_frames_ctx
uint8_t * name
struct InputStream * ist
AVFilterContext * filter
enum AVMediaType type
AVFifoBuffer * frame_queue
uint64_t channel_layout
struct FilterGraph * graph
AVRational sample_aspect_ratio
unsigned int initialize
marks if sub2video_update should force an initialization
AVFrame * decoded_frame
int64_t dts
dts of the last packet read for this stream (in AV_TIME_BASE units)
AVCodecContext * dec_ctx
int64_t pts
current pts of the decoded frame (in AV_TIME_BASE units)
struct InputStream::@2 prev_sub
uint64_t data_size
int64_t next_dts
AVPacket * pkt
struct InputStream::sub2video sub2video
AVStream * st
InputFilter ** filters
AVSubtitle subtitle
const AVCodec * dec
uint64_t limit_filesize
AVFormatContext * ctx
int64_t start_time
start time in microseconds == AV_TIME_BASE units
AVDictionary * opts
int64_t recording_time
desired length of the resulting file in microseconds == AV_TIME_BASE units
AVFilterInOut * out_tmp
struct OutputStream * ost
uint64_t * channel_layouts
AVFilterContext * filter
uint8_t * name
struct FilterGraph * graph
int max_muxing_queue_size
AVDictionary * swr_opts
int copy_initial_nonkeyframes
int64_t last_mux_dts
AVRational mux_timebase
double forced_keyframes_expr_const_values[FKF_NB]
OSTFinished finished
int * audio_channels_map
AVPacket * pkt
AVRational frame_aspect_ratio
double rotate_override_value
AVFrame * last_frame
const AVCodec * enc
int audio_channels_mapped
int64_t sync_opts
int64_t * forced_kf_pts
int64_t error[4]
uint64_t packets_written
uint64_t frames_encoded
int64_t max_frames
size_t muxing_queue_data_threshold
AVDictionary * resample_opts
AVRational max_frame_rate
AVRational enc_timebase
AVFifoBuffer * muxing_queue
AVCodecParameters * ref_par
char * forced_keyframes
AVFrame * filtered_frame
const char * attachment_filename
AVRational frame_rate
AVCodecContext * enc_ctx
struct InputStream * sync_ist
AVDictionary * encoder_opts
uint64_t data_size
AVStream * st
char * filters
filtergraph associated to the -filter option
int64_t forced_kf_ref_pts
uint64_t samples_encoded
char * filters_script
filtergraph script associated to the -filter_script option
AVBSFContext * bsf_ctx
int64_t first_pts
AVDictionary * sws_dict
OutputFilter * filter
char * disposition
AVExpr * forced_keyframes_pexpr
size_t muxing_queue_data_size
int last_nb0_frames[3]
char * logfile_prefix