Line data Source code
1 : /*
2 : * GPAC - Multimedia Framework C SDK
3 : *
4 : * Authors: Jean Le Feuvre
5 : * Copyright (c) Telecom Paris 2019
6 : * All rights reserved
7 : *
8 : * This file is part of GPAC / ffmpeg avfilter filter
9 : *
10 : * GPAC is free software; you can redistribute it and/or modify
11 : * it under the terms of the GNU Lesser General Public License as published by
12 : * the Free Software Foundation; either version 2, or (at your option)
13 : * any later version.
14 : *
15 : * GPAC is distributed in the hope that it will be useful,
16 : * but WITHOUT ANY WARRANTY; without even the implied warranty of
17 : * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
18 : * GNU Lesser General Public License for more details.
19 : *
20 : * You should have received a copy of the GNU Lesser General Public
21 : * License along with this library; see the file COPYING. If not, write to
22 : * the Free Software Foundation, 675 Mass Ave, Cambridge, MA 02139, USA.
23 : *
24 : */
25 :
26 : #include <gpac/setup.h>
27 :
28 : #ifdef GPAC_HAS_FFMPEG
29 :
30 : #include "ff_common.h"
31 : #include <gpac/network.h>
32 : #include <libavfilter/avfilter.h>
33 : #include <libavfilter/buffersrc.h>
34 : #include <libavfilter/buffersink.h>
35 :
36 : #if (LIBAVFILTER_VERSION_MAJOR < 7)
37 : #undef GPAC_HAS_FFMPEG
38 : #endif
39 :
40 : #endif
41 :
42 : #ifdef GPAC_HAS_FFMPEG
43 :
44 : typedef struct
45 : {
46 : AVFilterContext *io_filter_ctx;
47 : GF_FilterPid *io_pid;
48 : u32 timescale, pfmt, width, height, sr, nb_ch, bps;
49 : Bool planar;
50 : u64 ch_layout;
51 : GF_Fraction sar;
52 : u32 stride, stride_uv, nb_planes;
53 : //output only
54 : Bool is_video;
55 : u32 gf_pfmt, out_size, uv_height, uv_width, tb_num;
56 : } GF_FFAVPid;
57 :
58 :
59 : typedef struct
60 : {
61 : //options
62 : u32 pfmt, afmt, sr, ch;
63 : Bool dump;
64 :
65 : //internal
66 :
67 : GF_List *ipids;
68 : GF_List *opids;
69 :
70 : AVFilterGraph *filter_graph;
71 : char *filter_desc;
72 :
73 : GF_FilterCapability filter_caps[7];
74 : //0: not loaded, 1: graph config requested but graph not loaded, 2: graph loaded
75 : u32 configure_state;
76 : u32 nb_v_out, nb_a_out, nb_inputs;
77 :
78 : AVFilterInOut *outputs;
79 : AVFrame *frame;
80 :
81 : //0: no flush, 1: graph flush (push EOS in input), 2: wait for EOS in output
82 : u32 flush_state;
83 : GF_Err in_error;
84 : } GF_FFAVFilterCtx;
85 :
86 :
87 : static void ffavf_reset_graph(GF_FFAVFilterCtx *ctx)
88 : {
89 : if (ctx->outputs) {
90 : avfilter_inout_free(&ctx->outputs);
91 : ctx->outputs = NULL;
92 : }
93 : if (ctx->filter_graph) {
94 : avfilter_graph_free(&ctx->filter_graph);
95 : ctx->filter_graph = NULL;
96 : }
97 : }
98 :
99 :
100 : static GF_Err ffavf_setup_input(GF_FFAVFilterCtx *ctx, GF_FFAVPid *avpid)
101 : {
102 : int ret;
103 : char args[1024];
104 : const AVFilter *avf = NULL;
105 : const char *pid_name = gf_filter_pid_get_name(avpid->io_pid);
106 :
107 : if (avpid->width) {
108 : avf = avfilter_get_by_name("buffer");
109 : snprintf(args, sizeof(args),
110 : "video_size=%dx%d:pix_fmt=%d:time_base=%d/%d:pixel_aspect=%d/%d",
111 : avpid->width, avpid->height, avpid->pfmt, 1, avpid->timescale, avpid->sar.num, avpid->sar.den);
112 : } else {
113 : avf = avfilter_get_by_name("abuffer");
114 : snprintf(args, sizeof(args),
115 : "time_base=%d/%d:sample_rate=%d:sample_fmt=%s:channel_layout=0x"LLU,
116 : 1, avpid->timescale, avpid->sr, av_get_sample_fmt_name(avpid->pfmt), avpid->ch_layout);
117 : }
118 : avpid->io_filter_ctx = NULL;
119 : ret = avfilter_graph_create_filter(&avpid->io_filter_ctx, avf, pid_name, args, NULL, ctx->filter_graph);
120 : if (ret<0) {
121 : GF_LOG(GF_LOG_ERROR, GF_LOG_MEDIA, ("[FFAVF] Fail to create filter graph: %s\n", av_err2str(ret) ));
122 : return GF_BAD_PARAM;
123 : }
124 : return GF_OK;
125 : }
126 :
127 : static GF_Err ffavf_setup_outputs(GF_Filter *filter, GF_FFAVFilterCtx *ctx)
128 : {
129 : AVFilterInOut *io;
130 : u32 i, nb_outputs;
131 : int ret;
132 :
133 : //outputs are configured
134 : if (ctx->outputs) return GF_OK;
135 :
136 : //allocate output pids
137 : ctx->outputs = avfilter_inout_alloc();
138 : io = ctx->outputs;
139 : nb_outputs = ctx->nb_v_out + ctx->nb_a_out;
140 : for (i=0; i<nb_outputs; i++) {
141 : u32 k;
142 : char szName[20];
143 : const AVFilter *avf = NULL;
144 : GF_FFAVPid *opid = NULL;
145 : Bool is_video = i<ctx->nb_v_out ? GF_TRUE : GF_FALSE;
146 :
147 : for (k=0; k<gf_list_count(ctx->opids); k++) {
148 : opid = gf_list_get(ctx->opids, k);
149 : if (opid->is_video && is_video) break;
150 : if (!opid->is_video && !is_video) break;
151 : opid = NULL;
152 : }
153 : if (!opid) {
154 : GF_SAFEALLOC(opid, GF_FFAVPid);
155 : if (!opid) continue;
156 :
157 : gf_list_add(ctx->opids, opid);
158 : opid->io_pid = gf_filter_pid_new(filter);
159 : opid->is_video = i<ctx->nb_v_out ? GF_TRUE : GF_FALSE;
160 : //remove properties since we may have change format
161 : if (is_video) {
162 : gf_filter_pid_set_property(opid->io_pid, GF_PROP_PID_STREAM_TYPE, &PROP_UINT(GF_STREAM_VISUAL));
163 : gf_filter_pid_set_property(opid->io_pid, GF_PROP_PID_SAMPLE_RATE, NULL);
164 : gf_filter_pid_set_property(opid->io_pid, GF_PROP_PID_CHANNEL_LAYOUT, NULL);
165 : gf_filter_pid_set_property(opid->io_pid, GF_PROP_PID_NUM_CHANNELS, NULL);
166 : gf_filter_pid_set_property(opid->io_pid, GF_PROP_PID_AUDIO_BPS, NULL);
167 : gf_filter_pid_set_property(opid->io_pid, GF_PROP_PID_AUDIO_FORMAT, NULL);
168 : } else {
169 : gf_filter_pid_set_property(opid->io_pid, GF_PROP_PID_STREAM_TYPE, &PROP_UINT(GF_STREAM_AUDIO));
170 : gf_filter_pid_set_property(opid->io_pid, GF_PROP_PID_WIDTH, NULL);
171 : gf_filter_pid_set_property(opid->io_pid, GF_PROP_PID_HEIGHT, NULL);
172 : gf_filter_pid_set_property(opid->io_pid, GF_PROP_PID_PIXFMT, NULL);
173 : gf_filter_pid_set_property(opid->io_pid, GF_PROP_PID_FPS, NULL);
174 : gf_filter_pid_set_property(opid->io_pid, GF_PROP_PID_STRIDE, NULL);
175 : gf_filter_pid_set_property(opid->io_pid, GF_PROP_PID_STRIDE_UV, NULL);
176 : gf_filter_pid_set_property(opid->io_pid, GF_PROP_PID_SAR, NULL);
177 : gf_filter_pid_set_property(opid->io_pid, GF_PROP_PID_COLR_MX, NULL);
178 : gf_filter_pid_set_property(opid->io_pid, GF_PROP_PID_COLR_RANGE, NULL);
179 : gf_filter_pid_set_property(opid->io_pid, GF_PROP_PID_COLR_SPACE, NULL);
180 : gf_filter_pid_set_property(opid->io_pid, GF_PROP_PID_COLR_TRANSFER, NULL);
181 : gf_filter_pid_set_property(opid->io_pid, GF_PROP_PID_COLR_PRIMARIES, NULL);
182 : }
183 : gf_filter_pid_set_property(opid->io_pid, GF_PROP_PID_CODECID, &PROP_UINT(GF_CODECID_RAW));
184 : gf_filter_pid_set_property(opid->io_pid, GF_PROP_PID_FILE_EXT, NULL);
185 : gf_filter_pid_set_property(opid->io_pid, GF_PROP_PID_MIME, NULL);
186 : }
187 : opid->io_filter_ctx = NULL;
188 :
189 : if (opid->is_video) {
190 : sprintf(szName, "vout%d", i+1);
191 : avf = avfilter_get_by_name("buffersink");
192 : } else {
193 : sprintf(szName, "aout%d", (i-ctx->nb_v_out) + 1);
194 : avf = avfilter_get_by_name("abuffersink");
195 : }
196 :
197 : if (nb_outputs==1)
198 : sprintf(szName, "out");
199 :
200 : ret = avfilter_graph_create_filter(&opid->io_filter_ctx, avf, szName, NULL, NULL, ctx->filter_graph);
201 : if (ret<0) {
202 : GF_LOG(GF_LOG_ERROR, GF_LOG_MEDIA, ("[FFAVF] Fail to create %s filter: %s\n", avf->name, av_err2str(ret) ));
203 : return GF_BAD_PARAM;
204 : }
205 : if (opid->is_video) {
206 : if (ctx->pfmt) {
207 : enum AVPixelFormat pfmt = ffmpeg_pixfmt_from_gpac(ctx->pfmt);
208 : ret = av_opt_set_bin(opid->io_filter_ctx, "pix_fmts", (uint8_t*)&pfmt, sizeof(pfmt), AV_OPT_SEARCH_CHILDREN);
209 : if (ret < 0) {
210 : GF_LOG(GF_LOG_WARNING, GF_LOG_MEDIA, ("[FFAVF] Fail to set %s pixel format: %s\n", avf->name, av_err2str(ret) ));
211 : }
212 : }
213 : } else {
214 : if (ctx->afmt) {
215 : enum AVSampleFormat afmt = ffmpeg_audio_fmt_from_gpac(ctx->afmt);
216 : ret = av_opt_set_bin(opid->io_filter_ctx, "sample_fmts", (uint8_t*)&afmt, sizeof(afmt), AV_OPT_SEARCH_CHILDREN);
217 : if (ret < 0) {
218 : GF_LOG(GF_LOG_WARNING, GF_LOG_MEDIA, ("[FFAVF] Fail to set %s audio format: %s\n", avf->name, av_err2str(ret) ));
219 : }
220 : }
221 : if (ctx->sr) {
222 : ret = av_opt_set_bin(opid->io_filter_ctx, "sample_rates", (uint8_t*)&ctx->sr, sizeof(ctx->sr), AV_OPT_SEARCH_CHILDREN);
223 : if (ret < 0) {
224 : GF_LOG(GF_LOG_WARNING, GF_LOG_MEDIA, ("[FFAVF] Fail to set %s audio sample rate: %s\n", avf->name, av_err2str(ret) ));
225 : }
226 : }
227 : if (ctx->ch) {
228 : ret = av_opt_set_bin(opid->io_filter_ctx, "channels", (uint8_t*)&ctx->ch, sizeof(ctx->ch), AV_OPT_SEARCH_CHILDREN);
229 : if (ret < 0) {
230 : GF_LOG(GF_LOG_WARNING, GF_LOG_MEDIA, ("[FFAVF] Fail to set %s audio sample rate: %s\n", avf->name, av_err2str(ret) ));
231 : }
232 : }
233 :
234 : }
235 : io->name = av_strdup(szName);
236 : io->filter_ctx = opid->io_filter_ctx;
237 : io->pad_idx = 0;
238 : io->next = NULL;
239 : if (i+1==nb_outputs) break;
240 : io->next = avfilter_inout_alloc();
241 : io = io->next;
242 : }
243 : return GF_OK;
244 : }
245 :
246 :
247 : static GF_Err ffavf_reconfigure_graph(GF_Filter *filter, GF_FFAVFilterCtx *ctx)
248 : {
249 : u32 i, count;
250 : GF_Err e = GF_OK;
251 : ctx->flush_state = 0;
252 : ffavf_reset_graph(ctx);
253 : ctx->filter_graph = avfilter_graph_alloc();
254 :
255 : count = gf_list_count(ctx->ipids);
256 : for (i=0; i<count; i++) {
257 : GF_FFAVPid *ipid = gf_list_get(ctx->ipids, i);
258 : e = ffavf_setup_input(ctx, ipid);
259 : if (e) break;
260 : }
261 : ctx->configure_state = 2;
262 : if (!e)
263 : e = ffavf_setup_outputs(filter, ctx);
264 :
265 : if (e) ctx->in_error = e;
266 : return e;
267 : }
268 :
269 : static GF_Err ffavf_initialize(GF_Filter *filter)
270 : {
271 : u32 nb_v_in=0, nb_a_in=0;
272 : u32 i;
273 : AVFilterInOut *inputs;
274 : AVFilterInOut *outputs;
275 : AVFilterInOut *io;
276 : int ret;
277 : Bool dyn_inputs = GF_FALSE;
278 : GF_FFAVFilterCtx *ctx = (GF_FFAVFilterCtx *) gf_filter_get_udta(filter);
279 : if (!ctx->filter_desc) {
280 : GF_LOG(GF_LOG_ERROR, GF_LOG_MEDIA, ("[FFAVF] Missing filter graph description, cannot load\n"));
281 : return GF_BAD_PARAM;
282 : }
283 :
284 : ctx->ipids = gf_list_new();
285 : ctx->opids = gf_list_new();
286 : ctx->frame = av_frame_alloc();
287 :
288 :
289 : ctx->filter_graph = avfilter_graph_alloc();
290 : ret = avfilter_graph_parse2(ctx->filter_graph, ctx->filter_desc, &inputs, &outputs);
291 : if (ret<0) {
292 : GF_LOG(GF_LOG_ERROR, GF_LOG_MEDIA, ("[FFAVF] Fail to parse filter description: %s\nFilter description was %s\n", av_err2str(ret), ctx->filter_desc));
293 : return GF_BAD_PARAM;
294 : }
295 :
296 : ctx->nb_inputs=0;
297 : io = inputs;
298 : while (io) {
299 : if (io->filter_ctx->filter->flags & AVFILTER_FLAG_DYNAMIC_INPUTS)
300 : dyn_inputs = GF_TRUE;
301 :
302 : enum AVMediaType mt = avfilter_pad_get_type(io->filter_ctx->input_pads, io->pad_idx);
303 : u32 streamtype = ffmpeg_stream_type_to_gpac(mt);
304 :
305 : switch (streamtype) {
306 : case GF_STREAM_VISUAL: nb_v_in++; break;
307 : case GF_STREAM_AUDIO: nb_a_in++; break;
308 : }
309 : ctx->nb_inputs++;
310 : io = io->next;
311 : }
312 :
313 : ctx->nb_v_out = ctx->nb_a_out = 0;
314 : io = outputs;
315 : while (io) {
316 : for (i=0; i<io->filter_ctx->nb_outputs; i++) {
317 : enum AVMediaType mt = avfilter_pad_get_type(io->filter_ctx->output_pads, i);
318 : u32 streamtype = ffmpeg_stream_type_to_gpac(mt);
319 : switch (streamtype) {
320 : case GF_STREAM_VISUAL: ctx->nb_v_out++; break;
321 : case GF_STREAM_AUDIO: ctx->nb_a_out++; break;
322 : }
323 : }
324 : io = io->next;
325 : }
326 : avfilter_inout_free(&inputs);
327 : avfilter_inout_free(&outputs);
328 : avfilter_graph_free(&ctx->filter_graph);
329 : ctx->filter_graph = avfilter_graph_alloc();
330 :
331 : if (dyn_inputs) {
332 : gf_filter_set_max_extra_input_pids(filter, -1);
333 : } else if (nb_v_in + nb_a_in > 1)
334 : gf_filter_set_max_extra_input_pids(filter, nb_v_in + nb_a_in - 1);
335 :
336 : /*update filter caps*/
337 : memset(ctx->filter_caps, 0, sizeof(GF_FilterCapability) * 7);
338 : ctx->filter_caps[0].flags = GF_CAPS_INPUT_OUTPUT;
339 : ctx->filter_caps[0].code = GF_PROP_PID_CODECID;
340 : ctx->filter_caps[0].val = PROP_UINT(GF_CODECID_RAW);
341 : i=1;
342 : if (nb_v_in) {
343 : ctx->filter_caps[i].flags = ctx->nb_v_out ? GF_CAPS_INPUT_OUTPUT : GF_CAPS_INPUT;
344 : ctx->filter_caps[i].code = GF_PROP_PID_STREAM_TYPE;
345 : ctx->filter_caps[i].val = PROP_UINT(GF_STREAM_VISUAL);
346 : i++;
347 : }
348 : if (nb_a_in) {
349 : ctx->filter_caps[i].flags = ctx->nb_a_out ? GF_CAPS_INPUT_OUTPUT : GF_CAPS_INPUT;
350 : ctx->filter_caps[i].code = GF_PROP_PID_STREAM_TYPE;
351 : ctx->filter_caps[i].val = PROP_UINT(GF_STREAM_AUDIO);
352 : i++;
353 : }
354 : if (ctx->nb_v_out && !nb_v_in) {
355 : ctx->filter_caps[i].flags = GF_CAPS_OUTPUT;
356 : ctx->filter_caps[i].code = GF_PROP_PID_STREAM_TYPE;
357 : ctx->filter_caps[i].val = PROP_UINT(GF_STREAM_VISUAL);
358 : i++;
359 : }
360 : if (ctx->nb_a_out && !nb_a_in) {
361 : ctx->filter_caps[i].flags = GF_CAPS_OUTPUT;
362 : ctx->filter_caps[i].code = GF_PROP_PID_STREAM_TYPE;
363 : ctx->filter_caps[i].val = PROP_UINT(GF_STREAM_AUDIO);
364 : i++;
365 : }
366 : gf_filter_override_caps(filter, ctx->filter_caps, i);
367 :
368 : return GF_OK;
369 : }
370 :
371 : static void ffavf_dump_graph(GF_FFAVFilterCtx *ctx, const char *opt)
372 : {
373 : char *graphdump = avfilter_graph_dump(ctx->filter_graph, opt);
374 :
375 : if (graphdump) {
376 : #ifndef GPAC_DISABLE_LOG
377 : if (gf_log_tool_level_on(GF_LOG_MEDIA, GF_LOG_INFO)) {
378 : GF_LOG(GF_LOG_INFO, GF_LOG_MEDIA, ("[FFAVF] Graph dump:\n%s\n\n", graphdump ));
379 : } else
380 : #endif
381 : fprintf(stderr, "[FFAVF] Graph dump:\n%s\n\n", graphdump);
382 :
383 : av_free(graphdump);
384 : } else {
385 : GF_LOG(GF_LOG_ERROR, GF_LOG_MEDIA, ("[FFAVF] Failed to dump graph %s\n", ctx->filter_desc));
386 : }
387 :
388 : }
389 : static GF_Err ffavf_setup_filter(GF_Filter *filter, GF_FFAVFilterCtx *ctx)
390 : {
391 : int ret;
392 : AVFilterInOut *io, *inputs;
393 : u32 i, count = gf_list_count(ctx->ipids);
394 :
395 : //wait until we have one packet on each input
396 : for (i=0; i<count; i++) {
397 : GF_FFAVPid *pid_ctx = gf_list_get(ctx->ipids, i);
398 : GF_FilterPacket *pck = gf_filter_pid_get_packet(pid_ctx->io_pid);
399 : if (!pck) return GF_OK;
400 : }
401 : ctx->configure_state = 2;
402 :
403 : /*create inputs*/
404 : inputs = avfilter_inout_alloc();
405 : io = inputs;
406 : for (i=0; i<count; i++) {
407 : char szName[20];
408 : GF_FFAVPid *pid_ctx = gf_list_get(ctx->ipids, i);
409 :
410 : if (count==1)
411 : io->name = av_strdup("in");
412 : else {
413 : const GF_PropertyValue *p = gf_filter_pid_get_property_str(pid_ctx->io_pid, "ffid");
414 : if (p && p->value.string) {
415 : io->name = av_strdup(p->value.string);
416 : } else {
417 : if (i)
418 : sprintf(szName, "in%d", i+1);
419 : else
420 : sprintf(szName, "in");
421 : io->name = av_strdup(szName);
422 : GF_LOG(GF_LOG_WARNING, GF_LOG_MEDIA, ("[FFAVF] Multiple input for graph but no names assigned to input PIDs (missing ffid property), using %s as default name. Filter linking might fail\n", szName));
423 : }
424 : }
425 : io->filter_ctx = pid_ctx->io_filter_ctx;
426 :
427 : if (i+1==count) break;
428 : io->next = avfilter_inout_alloc();
429 : io = io->next;
430 : }
431 : //our outputs describe the filter graph outputs and our inputs desscribe the filter graph input
432 : //however avfilter_graph_parse_ptr expects:
433 : // inputs: the inputs of the next filter graph to connect to, hence our outputs
434 : // outputs: the outputs of the previous filter graph to connect to, hence our inputs
435 : ret = avfilter_graph_parse_ptr(ctx->filter_graph, ctx->filter_desc, &ctx->outputs, &inputs, NULL);
436 : avfilter_inout_free(&inputs);
437 : if (ret < 0) {
438 : GF_LOG(GF_LOG_ERROR, GF_LOG_MEDIA, ("[FFAVF] Fail to parse filter description: %s\nFilter description was %s\n", av_err2str(ret), ctx->filter_desc));
439 : return ctx->in_error = GF_BAD_PARAM;
440 : }
441 : ret = avfilter_graph_config(ctx->filter_graph, NULL);
442 : if (ret < 0) {
443 : GF_LOG(GF_LOG_ERROR, GF_LOG_MEDIA, ("[FFAVF] Fail to validate filter graph: %s\n", av_err2str(ret) ));
444 : return ctx->in_error = GF_BAD_PARAM;
445 : }
446 :
447 : if (ctx->dump)
448 : ffavf_dump_graph(ctx, NULL);
449 : return GF_OK;
450 : }
451 :
452 : static GF_Err ffavf_process(GF_Filter *filter)
453 : {
454 : int ret;
455 : GF_Err e = GF_OK;
456 : u32 i, count, nb_eos;
457 : GF_FFAVFilterCtx *ctx = (GF_FFAVFilterCtx *) gf_filter_get_udta(filter);
458 :
459 : if (ctx->in_error)
460 : return ctx->in_error;
461 :
462 : //graph needs to be loaded
463 : if (ctx->configure_state==1) {
464 : if (gf_filter_connections_pending(filter))
465 : return GF_OK;
466 : if (ctx->nb_inputs > gf_list_count(ctx->ipids))
467 : return GF_OK;
468 : return ffavf_setup_filter(filter, ctx);
469 : }
470 :
471 : //push input
472 : nb_eos = 0;
473 : count = gf_list_count(ctx->ipids);
474 : if (ctx->flush_state==2)
475 : count = 0;
476 :
477 : for (i=0; i<count; i++) {
478 : const u8 *data;
479 : u32 data_size;
480 : Bool frame_ok = GF_FALSE;
481 : GF_FFAVPid *ipid = gf_list_get(ctx->ipids, i);
482 : GF_FilterPacket *pck;
483 : pck = gf_filter_pid_get_packet(ipid->io_pid);
484 :
485 : //config changed at this packet, start flushing the graph
486 : if (ctx->flush_state==1) {
487 : ret = av_buffersrc_add_frame_flags(ipid->io_filter_ctx, NULL, 0);
488 : if (ret<0) {
489 : GF_LOG(GF_LOG_ERROR, GF_LOG_MEDIA, ("[FFAVF] Fail to flush filter graph: %s\n", av_err2str(ret) ));
490 : }
491 : continue;
492 : }
493 :
494 : if (!pck) {
495 : if (gf_filter_pid_is_eos(ipid->io_pid)) {
496 : ret = av_buffersrc_add_frame_flags(ipid->io_filter_ctx, NULL, 0);
497 : if (ret<0) {
498 : GF_LOG(GF_LOG_ERROR, GF_LOG_MEDIA, ("[FFAVF] Fail to signal EOS: %s\n", av_err2str(ret) ));
499 : e = GF_SERVICE_ERROR;
500 : }
501 : nb_eos++;
502 : }
503 : continue;
504 : }
505 : data = gf_filter_pck_get_data(pck, &data_size);
506 : if (data) {
507 : ctx->frame->data[0] = (uint8_t *) data;
508 : if (ipid->width) {
509 : ctx->frame->linesize[0] = ipid->stride;
510 : if (ipid->stride_uv) {
511 : ctx->frame->data[1] = ctx->frame->data[0] + ipid->height * ipid->stride;
512 : ctx->frame->linesize[1] = ipid->stride_uv;
513 : if (ipid->nb_planes==3) {
514 : ctx->frame->data[2] = ctx->frame->data[1] + ipid->uv_height * ipid->stride_uv;
515 : ctx->frame->linesize[2] = ipid->stride_uv;
516 : }
517 : }
518 : } else {
519 : ctx->frame->linesize[0] = data_size;
520 : }
521 : frame_ok = GF_TRUE;
522 : } else {
523 : u32 j;
524 : GF_FilterFrameInterface *fifce = gf_filter_pck_get_frame_interface(pck);
525 : if (fifce->get_plane) {
526 : frame_ok = GF_TRUE;
527 : for (j=0; j<ipid->nb_planes; j++) {
528 : e = fifce->get_plane(fifce, j, (const u8 **) &ctx->frame->data[j], &ctx->frame->linesize[j]);
529 : if (e) {
530 : frame_ok = GF_FALSE;
531 : break;
532 : }
533 : }
534 : }
535 : }
536 :
537 : if (frame_ok) {
538 : u64 cts = gf_filter_pck_get_cts(pck);
539 : ctx->frame->pts = cts;
540 : if (ipid->width) {
541 : ctx->frame->width = ipid->width;
542 : ctx->frame->height = ipid->height;
543 : ctx->frame->format = ipid->pfmt;
544 : ctx->frame->sample_aspect_ratio.num = ipid->sar.num;
545 : ctx->frame->sample_aspect_ratio.den = ipid->sar.den;
546 : } else {
547 : ctx->frame->channel_layout = ipid->ch_layout;
548 : ctx->frame->channels = ipid->nb_ch;
549 : ctx->frame->sample_rate = ipid->sr;
550 : ctx->frame->format = ipid->pfmt;
551 : ctx->frame->nb_samples = data_size / ipid->nb_ch / ipid->bps;
552 : if (ipid->planar) {
553 : u32 ch_idx;
554 : for (ch_idx=0; ch_idx<ipid->nb_ch; ch_idx++) {
555 : ctx->frame->extended_data[ch_idx] = (uint8_t *) data + ctx->frame->nb_samples*ipid->bps*ch_idx;
556 : }
557 : }
558 : }
559 : /* push the decoded frame into the filtergraph */
560 : ret = av_buffersrc_add_frame_flags(ipid->io_filter_ctx, ctx->frame, 0);
561 : if (ret < 0) {
562 : GF_LOG(GF_LOG_ERROR, GF_LOG_MEDIA, ("[FFAVF] Fail to push frame to filtergaph: %s\n", av_err2str(ret) ));
563 : e = GF_SERVICE_ERROR;
564 : break;
565 : }
566 : } else {
567 : GF_LOG(GF_LOG_ERROR, GF_LOG_MEDIA, ("[FFAVF] Fail to fetch data from frame\n"));
568 : e = GF_SERVICE_ERROR;
569 : break;
570 : }
571 : gf_filter_pid_drop_packet(ipid->io_pid);
572 : }
573 : if (count>nb_eos) nb_eos=0;
574 :
575 : if (ctx->flush_state==1) {
576 : ctx->flush_state = 2;
577 : nb_eos = 0;
578 : }
579 :
580 : //pull output
581 : count = gf_list_count(ctx->opids);
582 : for (i=0; i<count; i++) {
583 : GF_FFAVPid *opid = gf_list_get(ctx->opids, i);
584 : if (!nb_eos && gf_filter_pid_would_block(opid->io_pid)) {
585 : continue;
586 : }
587 :
588 : AVFrame *frame = av_frame_alloc();
589 :
590 : ret = av_buffersink_get_frame(opid->io_filter_ctx, frame);
591 : if (ret < 0) {
592 : if (ret == AVERROR_EOF) {
593 : if (ctx->flush_state==2) {
594 : nb_eos++;
595 : } else if (nb_eos) {
596 : gf_filter_pid_set_eos(opid->io_pid);
597 : }
598 : } else if (ret != AVERROR(EAGAIN)) {
599 : GF_LOG(GF_LOG_ERROR, GF_LOG_MEDIA, ("[FFAVF] Fail to pull frame from filtergaph: %s\n", av_err2str(ret) ));
600 : e = GF_SERVICE_ERROR;
601 : }
602 : av_frame_free(&frame);
603 : break;
604 : }
605 : if (opid->is_video) {
606 : u8 *buffer;
607 : u32 j;
608 : GF_FilterPacket *pck;
609 : Bool update_props=GF_TRUE;
610 : if (frame->width!=opid->width) {}
611 : else if (frame->height!=opid->height) {}
612 : else if (frame->format != opid->pfmt) {}
613 : else {
614 : update_props = GF_FALSE;
615 : }
616 : if (update_props) {
617 : gf_filter_pid_set_property(opid->io_pid, GF_PROP_PID_WIDTH, &PROP_UINT(frame->width));
618 : gf_filter_pid_set_property(opid->io_pid, GF_PROP_PID_HEIGHT, &PROP_UINT(frame->height));
619 : opid->gf_pfmt = ffmpeg_pixfmt_to_gpac(frame->format);
620 : if (ffmpeg_pixfmt_is_fullrange(frame->format)) {
621 : gf_filter_pid_set_property(opid->io_pid, GF_PROP_PID_COLR_RANGE, &PROP_BOOL(GF_TRUE));
622 : } else {
623 : gf_filter_pid_set_property(opid->io_pid, GF_PROP_PID_COLR_RANGE, NULL);
624 : }
625 : gf_filter_pid_set_property(opid->io_pid, GF_PROP_PID_PIXFMT, &PROP_UINT(opid->gf_pfmt));
626 : gf_filter_pid_set_property(opid->io_pid, GF_PROP_PID_STRIDE, &PROP_UINT(frame->linesize[0]));
627 : if (frame->linesize[1])
628 : gf_filter_pid_set_property(opid->io_pid, GF_PROP_PID_STRIDE_UV, &PROP_UINT(frame->linesize[1]));
629 : else
630 : gf_filter_pid_set_property(opid->io_pid, GF_PROP_PID_STRIDE_UV, NULL);
631 :
632 : gf_filter_pid_set_property(opid->io_pid, GF_PROP_PID_TIMESCALE, &PROP_UINT(opid->io_filter_ctx->inputs[0]->time_base.den) );
633 : gf_filter_pid_set_property(opid->io_pid, GF_PROP_PID_FPS, &PROP_FRAC_INT(opid->io_filter_ctx->inputs[0]->time_base.den, opid->io_filter_ctx->inputs[0]->time_base.num) );
634 :
635 : opid->width = frame->width;
636 : opid->height = frame->height;
637 : opid->pfmt = frame->format;
638 : opid->tb_num = opid->io_filter_ctx->inputs[0]->time_base.num;
639 : opid->stride = 0;
640 : opid->stride_uv = 0;
641 : gf_pixel_get_size_info(opid->gf_pfmt, opid->width, opid->height, &opid->out_size, &opid->stride, &opid->stride_uv, NULL, &opid->uv_height);
642 : if ((opid->gf_pfmt==GF_PIXEL_YUV444) || (opid->gf_pfmt==GF_PIXEL_YUV444_10)) {
643 : opid->uv_width = opid->width;
644 : } else if (opid->uv_height) {
645 : opid->uv_width = opid->width/2;
646 : } else {
647 : opid->uv_width = 0;
648 : }
649 : if (ctx->nb_a_out+ctx->nb_v_out>1) {
650 : gf_filter_pid_set_property_str(opid->io_pid, "ffid", &PROP_STRING(opid->io_filter_ctx->name));
651 : }
652 : }
653 : pck = gf_filter_pck_new_alloc(opid->io_pid, opid->out_size, &buffer);
654 : if (!pck) return GF_OUT_OF_MEM;
655 :
656 : for (j=0; j<opid->height; j++) {
657 : memcpy(buffer + j*opid->stride, frame->data[0] + j*frame->linesize[0], opid->stride);
658 : }
659 : if (frame->linesize[1]) {
660 : buffer += opid->height*opid->stride;
661 : for (j=0; j<opid->uv_height; j++) {
662 : memcpy(buffer + j*opid->stride_uv, frame->data[1] + j*frame->linesize[1], opid->uv_width);
663 : }
664 : }
665 : if (frame->linesize[2]) {
666 : buffer += opid->uv_height*opid->stride_uv;
667 : for (j=0; j<opid->uv_height; j++) {
668 : memcpy(buffer + j*opid->stride_uv, frame->data[2] + j*frame->linesize[2], opid->uv_width);
669 : }
670 : }
671 : if (frame->interlaced_frame)
672 : gf_filter_pck_set_interlaced(pck, frame->top_field_first ? 1 : 2);
673 :
674 : gf_filter_pck_set_sap(pck, GF_FILTER_SAP_1);
675 : gf_filter_pck_set_cts(pck, frame->pts * opid->tb_num);
676 : gf_filter_pck_send(pck);
677 : } else {
678 : u8 *buffer;
679 : u32 j, out_size;
680 : GF_FilterPacket *pck;
681 : Bool update_props=GF_TRUE;
682 : if (frame->sample_rate!=opid->sr) {}
683 : else if (frame->channel_layout!=opid->ch_layout) {}
684 : else if (frame->channels != opid->nb_ch) {}
685 : else if (frame->format != opid->pfmt) {}
686 : else {
687 : update_props = GF_FALSE;
688 : }
689 : if (update_props) {
690 : gf_filter_pid_set_property(opid->io_pid, GF_PROP_PID_SAMPLE_RATE, &PROP_UINT(frame->sample_rate));
691 : gf_filter_pid_set_property(opid->io_pid, GF_PROP_PID_CHANNEL_LAYOUT, &PROP_LONGUINT(frame->channel_layout));
692 : gf_filter_pid_set_property(opid->io_pid, GF_PROP_PID_NUM_CHANNELS, &PROP_UINT(frame->channels));
693 : opid->gf_pfmt = ffmpeg_audio_fmt_to_gpac(frame->format);
694 : gf_filter_pid_set_property(opid->io_pid, GF_PROP_PID_AUDIO_FORMAT, &PROP_UINT(opid->gf_pfmt));
695 : gf_filter_pid_set_property(opid->io_pid, GF_PROP_PID_TIMESCALE, &PROP_UINT(opid->io_filter_ctx->inputs[0]->time_base.den) );
696 :
697 : opid->sr = frame->sample_rate;
698 : opid->ch_layout = frame->channel_layout;
699 : opid->nb_ch = frame->channels;
700 : opid->pfmt = frame->format;
701 : opid->tb_num = opid->io_filter_ctx->inputs[0]->time_base.num;
702 : opid->bps = gf_audio_fmt_bit_depth(opid->gf_pfmt) / 8;
703 : if (ctx->nb_a_out+ctx->nb_v_out>1) {
704 : gf_filter_pid_set_property_str(opid->io_pid, "ffid", &PROP_STRING(opid->io_filter_ctx->name));
705 : }
706 : }
707 : out_size = 0;
708 : for (j=0; j<8; j++) {
709 : if (!frame->linesize[j]) break;
710 : out_size += frame->linesize[j];
711 : }
712 :
713 : pck = gf_filter_pck_new_alloc(opid->io_pid, out_size, &buffer);
714 : if (!pck) return GF_OUT_OF_MEM;
715 :
716 : for (j=0; j<8; j++) {
717 : if (!frame->linesize[j]) break;
718 : memcpy(buffer, frame->data[0], frame->linesize[j]);
719 : buffer += frame->linesize[j];
720 : }
721 : gf_filter_pck_set_sap(pck, GF_FILTER_SAP_1);
722 : gf_filter_pck_set_cts(pck, frame->pts * opid->tb_num);
723 : gf_filter_pck_send(pck);
724 : }
725 : av_frame_free(&frame);
726 : }
727 : if (e) return e;
728 : if (ctx->flush_state==2) {
729 : if (nb_eos<count) return GF_OK;
730 : return ffavf_reconfigure_graph(filter, ctx);
731 : }
732 : if (nb_eos) return GF_EOS;
733 : return GF_OK;
734 : }
735 :
736 : static GF_Err ffavf_configure_pid(GF_Filter *filter, GF_FilterPid *pid, Bool is_remove)
737 : {
738 : u32 streamtype;
739 : GF_FFAVPid *pid_ctx;
740 : Bool check_recfg = GF_FALSE;
741 : const GF_PropertyValue *p;
742 : GF_Fraction timebase;
743 : GF_FFAVFilterCtx *ctx = (GF_FFAVFilterCtx *) gf_filter_get_udta(filter);
744 :
745 : gf_filter_pid_check_caps(pid);
746 : p = gf_filter_pid_get_property(pid, GF_PROP_PID_STREAM_TYPE);
747 : if (!p) return GF_BAD_PARAM;
748 : streamtype = p->value.uint;
749 :
750 : pid_ctx = gf_filter_pid_get_udta(pid);
751 : if (!pid_ctx) {
752 : GF_SAFEALLOC(pid_ctx, GF_FFAVPid);
753 : if (!pid_ctx) return GF_OUT_OF_MEM;
754 :
755 : pid_ctx->io_pid = pid;
756 : gf_filter_pid_set_udta(pid, pid_ctx);
757 : gf_list_add(ctx->ipids, pid_ctx);
758 : } else {
759 : check_recfg = GF_TRUE;
760 : }
761 :
762 : timebase.num = 1;
763 : p = gf_filter_pid_get_property(pid, GF_PROP_PID_TIMESCALE);
764 : if (!p) return GF_BAD_PARAM;
765 : timebase.den = p->value.uint;
766 :
767 : ctx->in_error = GF_OK;
768 :
769 : if (streamtype==GF_STREAM_VISUAL) {
770 : u32 width, height, pix_fmt, gf_pfmt;
771 : GF_Fraction sar={1,1};
772 : p = gf_filter_pid_get_property(pid, GF_PROP_PID_WIDTH);
773 : if (!p) return GF_OK; //not ready yet
774 : width = p->value.uint;
775 :
776 : p = gf_filter_pid_get_property(pid, GF_PROP_PID_HEIGHT);
777 : if (!p) return GF_OK; //not ready yet
778 : height = p->value.uint;
779 :
780 : p = gf_filter_pid_get_property(pid, GF_PROP_PID_PIXFMT);
781 : if (!p) return GF_OK; //not ready yet
782 : gf_pfmt = p->value.uint;
783 : pix_fmt = ffmpeg_pixfmt_from_gpac(gf_pfmt);
784 :
785 : p = gf_filter_pid_get_property(pid, GF_PROP_PID_SAR);
786 : if (p && p->value.frac.num && p->value.frac.den) sar = p->value.frac;
787 :
788 : pid_ctx->stride = pid_ctx->stride_uv = 0;
789 : p = gf_filter_pid_get_property(pid, GF_PROP_PID_STRIDE);
790 : if (p) pid_ctx->stride = p->value.uint;
791 : p = gf_filter_pid_get_property(pid, GF_PROP_PID_STRIDE_UV);
792 : if (p) pid_ctx->stride_uv = p->value.uint;
793 : gf_pixel_get_size_info(gf_pfmt, width, height, NULL, &pid_ctx->stride, &pid_ctx->stride_uv, &pid_ctx->nb_planes, &pid_ctx->uv_height);
794 :
795 : if (check_recfg) {
796 : check_recfg = GF_FALSE;
797 : if (width!=pid_ctx->width) {}
798 : else if (height!=pid_ctx->height) {}
799 : else if (pix_fmt!=pid_ctx->pfmt) {}
800 : else if (timebase.den!=pid_ctx->timescale) {}
801 : else if (sar.den * pid_ctx->sar.num != sar.num * pid_ctx->sar.den) {}
802 : else {
803 : return GF_OK;
804 : }
805 : }
806 : pid_ctx->width = width;
807 : pid_ctx->height = height;
808 : pid_ctx->pfmt = pix_fmt;
809 : pid_ctx->timescale = timebase.den;
810 : pid_ctx->sar = sar;
811 : } else if (streamtype==GF_STREAM_AUDIO) {
812 : u64 ch_layout=0;
813 : u32 sr, afmt, nb_ch;
814 : p = gf_filter_pid_get_property(pid, GF_PROP_PID_CHANNEL_LAYOUT);
815 : if (p) ch_layout = p->value.longuint;
816 :
817 : p = gf_filter_pid_get_property(pid, GF_PROP_PID_NUM_CHANNELS);
818 : if (!p) return GF_OK; //not ready yet
819 : nb_ch = p->value.uint;
820 : if (!ch_layout) ch_layout = av_get_default_channel_layout(p->value.uint);
821 :
822 : p = gf_filter_pid_get_property(pid, GF_PROP_PID_SAMPLE_RATE);
823 : if (!p) return GF_OK; //not ready yet
824 : sr = p->value.uint;
825 :
826 : p = gf_filter_pid_get_property(pid, GF_PROP_PID_AUDIO_FORMAT);
827 : if (!p) return GF_OK; //not ready yet
828 : afmt = ffmpeg_audio_fmt_from_gpac(p->value.uint);
829 : pid_ctx->bps = gf_audio_fmt_bit_depth(p->value.uint) / 8;
830 : pid_ctx->planar = gf_audio_fmt_is_planar(p->value.uint);
831 :
832 : if (check_recfg) {
833 : if (sr!=pid_ctx->sr) {}
834 : else if (afmt!=pid_ctx->pfmt) {}
835 : else if (nb_ch!=pid_ctx->nb_ch) {}
836 : else if (ch_layout!=pid_ctx->ch_layout) {}
837 : else if (timebase.den!=pid_ctx->timescale) {}
838 : else {
839 : return GF_OK;
840 : }
841 : }
842 : pid_ctx->sr = sr;
843 : pid_ctx->ch_layout = ch_layout;
844 : pid_ctx->pfmt = afmt;
845 : pid_ctx->nb_ch = nb_ch;
846 : pid_ctx->timescale = timebase.den;
847 : } else {
848 : return GF_NOT_SUPPORTED;
849 : }
850 : gf_filter_pid_set_framing_mode(pid, GF_TRUE);
851 :
852 : //we have not yet configured graph, no need to flush
853 : if (ctx->configure_state==1) {
854 : check_recfg = GF_FALSE;
855 : }
856 :
857 : //pid config change, flush current graph and resetup
858 : if (check_recfg) ctx->flush_state = 1;
859 : //graph has already been loaded, we need to flush/resetup to add the pid
860 : if (ctx->configure_state==2) ctx->flush_state = 1;
861 : //in flush state, either because of this reconfig or a previous one
862 : if (ctx->flush_state)
863 : return GF_OK;
864 :
865 : //setup input connections
866 : ctx->in_error = ffavf_setup_input(ctx, pid_ctx);
867 : if (ctx->in_error) return ctx->in_error;
868 :
869 : //mark graph to be rebuild
870 : ctx->configure_state = 1;
871 : //setup output connections
872 : ctx->in_error = ffavf_setup_outputs(filter, ctx);
873 : return ctx->in_error;
874 : }
875 :
876 :
877 : static void ffavf_finalize(GF_Filter *filter)
878 : {
879 : GF_FFAVFilterCtx *ctx = (GF_FFAVFilterCtx *) gf_filter_get_udta(filter);
880 :
881 : ffavf_reset_graph(ctx);
882 : while (gf_list_count(ctx->ipids)) {
883 : GF_FFAVPid *ipid = gf_list_pop_back(ctx->ipids);
884 : //io_filter_ctx is destroyed while reseting the graph
885 : gf_free(ipid);
886 : }
887 : gf_list_del(ctx->ipids);
888 : while (gf_list_count(ctx->opids)) {
889 : GF_FFAVPid *opid = gf_list_pop_back(ctx->opids);
890 : //io_filter_ctx is destroyed while reseting the graph
891 : gf_free(opid);
892 : }
893 : gf_list_del(ctx->opids);
894 : if (ctx->filter_desc) gf_free(ctx->filter_desc);
895 : if (ctx->frame) av_frame_free(&ctx->frame);
896 : }
897 :
898 : static GF_Err ffavf_update_arg(GF_Filter *filter, const char *arg_name, const GF_PropertyValue *arg_val)
899 : {
900 : int ret;
901 : char *arg_value;
902 :
903 : GF_FFAVFilterCtx *ctx = gf_filter_get_udta(filter);
904 :
905 : if (!strcmp(arg_name, "f")) {
906 : if (ctx->filter_graph) {
907 : GF_LOG(GF_LOG_ERROR, GF_LOG_MEDIA, ("[FFAVF] Cannot update filter description while running, not supported\n"));
908 : return GF_NOT_SUPPORTED;
909 : }
910 : if (ctx->filter_desc) gf_free(ctx->filter_desc);
911 : ctx->filter_desc = gf_strdup(arg_val->value.string);
912 : return GF_OK;
913 : }
914 :
915 : arg_value = NULL;
916 : if (arg_val->type == GF_PROP_STRING) {
917 : arg_value = arg_val->value.string;
918 : }
919 :
920 : if (!strcmp(arg_name, "dump")) {
921 : ffavf_dump_graph(ctx, (arg_value && strlen(arg_value) ) ? arg_value : NULL);
922 : //do not change the dump value
923 : return GF_NOT_FOUND;
924 : }
925 :
926 : if (!arg_value) {
927 : GF_LOG(GF_LOG_ERROR, GF_LOG_MEDIA, ("[FFAVF] Failed to set option %s:%s, unrecognized type %d\n", arg_name, arg_val, arg_val->type ));
928 : return GF_NOT_SUPPORTED;
929 : }
930 :
931 : if (ctx->filter_graph) {
932 : char *arg = (char *) arg_name;
933 : char szTargetName[101];
934 : char szCommandRes[1025];
935 : char *target = strchr(arg_name, gf_filter_get_sep(filter, GF_FS_SEP_FRAG));
936 : if (target) {
937 : u32 len = (u32) (target - arg_name);
938 : if (len>=100) len=100;
939 : strncpy(szTargetName, arg_name, len);
940 : szTargetName[100] = 0;
941 : arg = target+1;
942 : } else {
943 : strcpy(szTargetName, "all");
944 : }
945 : ret = avfilter_graph_send_command(ctx->filter_graph, szTargetName, arg, arg_value, szCommandRes, 1024, 0);
946 : if (ret<0) {
947 : GF_LOG(GF_LOG_ERROR, GF_LOG_MEDIA, ("[FFAVF] Failed to execute command %s: %s\n", arg_name, av_err2str(ret) ));
948 : return GF_BAD_PARAM;
949 : }
950 : return GF_OK;
951 : }
952 : //other options are not allowed, they MUST be passed as part of `f` option
953 : return GF_NOT_FOUND;
954 : }
955 :
956 :
957 : static const GF_FilterCapability FFAVFilterCaps[] =
958 : {
959 : CAP_UINT(GF_CAPS_INPUT_OUTPUT, GF_PROP_PID_STREAM_TYPE, GF_STREAM_VISUAL),
960 : CAP_UINT(GF_CAPS_INPUT_OUTPUT, GF_PROP_PID_CODECID, GF_CODECID_RAW),
961 : {0},
962 : CAP_UINT(GF_CAPS_INPUT_OUTPUT, GF_PROP_PID_STREAM_TYPE, GF_STREAM_AUDIO),
963 : CAP_UINT(GF_CAPS_INPUT_OUTPUT, GF_PROP_PID_CODECID, GF_CODECID_RAW),
964 : };
965 :
966 : GF_FilterRegister FFAVFilterRegister = {
967 : .name = "ffavf",
968 : .version = LIBAVFORMAT_IDENT,
969 : GF_FS_SET_DESCRIPTION("FFMPEG AVFilter")
970 : GF_FS_SET_HELP("This filter provides libavfilter raw audio and video tools.\n"
971 : "See FFMPEG documentation (https://ffmpeg.org/documentation.html) for more details\n"
972 : "To list all supported avfilters for your GPAC build, use `gpac -h ffavf:*`.\n"
973 : "\n"
974 : "# Declaring a filter\n"
975 : "The filter loads a filter or a filter chain description from the [-f]() option.\n"
976 : "EX ffavf:f=showspectrum\n"
977 : "\n"
978 : "Unlike other FFMPEG bindings in GPAC, this filter does not parse other libavfilter options, you must specify them directly in the filter chain, and the [-f]() option will have to be escaped.\n"
979 : "EX ffavf::f=showspectrum=size=320x320 or ffavf::f=showspectrum=size=320x320::pfmt=rgb\n"
980 : "\n"
981 : "The filter will automatically create `buffer` and `buffersink` AV filters for data exchange between GPAC and libavfilter.\n"
982 : "The builtin options ( [-pfmt](), [-afmt]() ...) can be used to configure the `buffersink` filter to set the output format of the filter.\n"
983 : "\n"
984 : "# Naming of PIDs\n"
985 : "For simple filter graphs with only one input and one output, the input PID is assigned the avfilter name `in` and the output PID is assigned the avfilter name `out`\n"
986 : "\n"
987 : "When a graph has several inputs, input PID names shall be assigned by the user using the `ffid` property, and mapping must be done in the filter.\n"
988 : "EX src=video:#ffid=a src=logo:#ffid=b ffavf::f=[a][b]overlay=main_w-overlay_w-10:main_h-overlay_h-10\n"
989 : "In this example:\n"
990 : "- the video source is identified as `a`\n"
991 : "- the logo source is identified as `b`\n"
992 : "- the filter declaration maps `a` to its first input (in this case, main video) and `b` to its second input (in this case the overlay)\n"
993 : "\n"
994 : "When a graph has several outputs, output PIDs will be identified using the `ffid` property set to the output avfilter name.\n"
995 : "EX src=source ffavf::f=split inspect:SID=#ffid=out0 vout#SID=out1\n"
996 : "In this example:\n"
997 : "- the splitter produces 2 video streams `out0` and `out1`\n"
998 : "- the inspector only process stream with ffid `out0`\n"
999 : "- the video output only displays stream with ffid `out1`\n"
1000 : "\n"
1001 : "The name(s) of the final output of the avfilter graph cannot be configured in GPAC. You can however name intermediate output(s) in a complex filter chain as usual.\n"
1002 : "\n"
1003 : "# Filter graph commands\n"
1004 : "The filter handles option updates as commands passed to the AV filter graph. The syntax expected in the option name is:\n"
1005 : "- com_name=value: sends command `com_name` with value `value` to all filters\n"
1006 : "- name#com_name=value: sends command `com_name` with value `value` to filter named `name`\n"
1007 : "\n"
1008 : )
1009 : .flags = GF_FS_REG_META | GF_FS_REG_EXPLICIT_ONLY,
1010 : .private_size = sizeof(GF_FFAVFilterCtx),
1011 : SETCAPS(FFAVFilterCaps),
1012 : .initialize = ffavf_initialize,
1013 : .finalize = ffavf_finalize,
1014 : .configure_pid = ffavf_configure_pid,
1015 : .process = ffavf_process,
1016 : .update_arg = ffavf_update_arg,
1017 : };
1018 :
1019 : #define OFFS(_n) #_n, offsetof(GF_FFAVFilterCtx, _n)
1020 :
1021 : static const GF_FilterArgs FFAVFilterArgs[] =
1022 : {
1023 : { "f", -1, "filter or filter chain description - see filter help", GF_PROP_STRING, NULL, NULL, GF_FS_ARG_META},
1024 : { OFFS(pfmt), "pixel format of output. If not set, let AVFilter decide", GF_PROP_PIXFMT, "none", NULL, 0},
1025 : { OFFS(afmt), "audio format of output. If not set, let AVFilter decide", GF_PROP_PCMFMT, "none", NULL, 0},
1026 : { OFFS(sr), "sample rate of output. If not set, let AVFilter decide", GF_PROP_UINT, "0", NULL, 0},
1027 : { OFFS(ch), "number of channels of output. If not set, let AVFilter decide", GF_PROP_UINT, "0", NULL, 0},
1028 : { OFFS(dump), "dump graph as log media@info or stderr if not set", GF_PROP_BOOL, "false", NULL, GF_FS_ARG_UPDATE},
1029 : { "*", -1, "any possible options defined for AVFilter and sub-classes. See `gpac -hx ffavf` and `gpac -hx ffavf:*`", GF_PROP_STRING, NULL, NULL, GF_FS_ARG_META},
1030 : {0}
1031 : };
1032 :
1033 : const int FFAVF_STATIC_ARGS = (sizeof (FFAVFilterArgs) / sizeof (GF_FilterArgs)) - 1;
1034 :
1035 : const GF_FilterRegister *ffavf_register(GF_FilterSession *session)
1036 : {
1037 : ffmpeg_build_register(session, &FFAVFilterRegister, FFAVFilterArgs, FFAVF_STATIC_ARGS, FF_REG_TYPE_AVF);
1038 : return &FFAVFilterRegister;
1039 : }
1040 :
1041 : #else
1042 : #include <gpac/filters.h>
1043 2877 : const GF_FilterRegister *ffavf_register(GF_FilterSession *session)
1044 : {
1045 2877 : return NULL;
1046 : }
1047 : #endif
|