|
|
|
@ -983,20 +983,40 @@ int sch_connect(Scheduler *sch, SchedulerNode src, SchedulerNode dst) |
|
|
|
|
} |
|
|
|
|
case SCH_NODE_TYPE_FILTER_OUT: { |
|
|
|
|
SchFilterOut *fo; |
|
|
|
|
SchEnc *enc; |
|
|
|
|
|
|
|
|
|
av_assert0(src.idx < sch->nb_filters && |
|
|
|
|
src.idx_stream < sch->filters[src.idx].nb_outputs); |
|
|
|
|
// filtered frames go to encoding
|
|
|
|
|
av_assert0(dst.type == SCH_NODE_TYPE_ENC && |
|
|
|
|
dst.idx < sch->nb_enc); |
|
|
|
|
fo = &sch->filters[src.idx].outputs[src.idx_stream]; |
|
|
|
|
|
|
|
|
|
fo = &sch->filters[src.idx].outputs[src.idx_stream]; |
|
|
|
|
enc = &sch->enc[dst.idx]; |
|
|
|
|
av_assert0(!fo->dst.type); |
|
|
|
|
fo->dst = dst; |
|
|
|
|
|
|
|
|
|
// filtered frames go to encoding or another filtergraph
|
|
|
|
|
switch (dst.type) { |
|
|
|
|
case SCH_NODE_TYPE_ENC: { |
|
|
|
|
SchEnc *enc; |
|
|
|
|
|
|
|
|
|
av_assert0(dst.idx < sch->nb_enc); |
|
|
|
|
enc = &sch->enc[dst.idx]; |
|
|
|
|
|
|
|
|
|
av_assert0(!enc->src.type); |
|
|
|
|
enc->src = src; |
|
|
|
|
break; |
|
|
|
|
} |
|
|
|
|
case SCH_NODE_TYPE_FILTER_IN: { |
|
|
|
|
SchFilterIn *fi; |
|
|
|
|
|
|
|
|
|
av_assert0(dst.idx < sch->nb_filters && |
|
|
|
|
dst.idx_stream < sch->filters[dst.idx].nb_inputs); |
|
|
|
|
fi = &sch->filters[dst.idx].inputs[dst.idx_stream]; |
|
|
|
|
|
|
|
|
|
av_assert0(!fi->src.type); |
|
|
|
|
fi->src = src; |
|
|
|
|
break; |
|
|
|
|
} |
|
|
|
|
default: av_assert0(0); |
|
|
|
|
} |
|
|
|
|
|
|
|
|
|
av_assert0(!fo->dst.type && !enc->src.type); |
|
|
|
|
fo->dst = dst; |
|
|
|
|
enc->src = src; |
|
|
|
|
|
|
|
|
|
break; |
|
|
|
|
} |
|
|
|
@ -1351,24 +1371,13 @@ static int check_acyclic(Scheduler *sch) |
|
|
|
|
goto fail; |
|
|
|
|
} |
|
|
|
|
|
|
|
|
|
// trace the transcoding graph upstream from every output stream
|
|
|
|
|
// fed by a filtergraph
|
|
|
|
|
for (unsigned i = 0; i < sch->nb_mux; i++) { |
|
|
|
|
SchMux *mux = &sch->mux[i]; |
|
|
|
|
|
|
|
|
|
for (unsigned j = 0; j < mux->nb_streams; j++) { |
|
|
|
|
SchMuxStream *ms = &mux->streams[j]; |
|
|
|
|
SchedulerNode src = ms->src_sched; |
|
|
|
|
|
|
|
|
|
if (src.type != SCH_NODE_TYPE_FILTER_OUT) |
|
|
|
|
continue; |
|
|
|
|
src.idx_stream = 0; |
|
|
|
|
|
|
|
|
|
ret = check_acyclic_for_output(sch, src, filters_visited, filters_stack); |
|
|
|
|
if (ret < 0) { |
|
|
|
|
av_log(mux, AV_LOG_ERROR, "Transcoding graph has a cycle\n"); |
|
|
|
|
goto fail; |
|
|
|
|
} |
|
|
|
|
// trace the transcoding graph upstream from every filtegraph
|
|
|
|
|
for (unsigned i = 0; i < sch->nb_filters; i++) { |
|
|
|
|
ret = check_acyclic_for_output(sch, (SchedulerNode){ .idx = i }, |
|
|
|
|
filters_visited, filters_stack); |
|
|
|
|
if (ret < 0) { |
|
|
|
|
av_log(&sch->filters[i], AV_LOG_ERROR, "Transcoding graph has a cycle\n"); |
|
|
|
|
goto fail; |
|
|
|
|
} |
|
|
|
|
} |
|
|
|
|
|
|
|
|
@ -1484,13 +1493,18 @@ static int start_prepare(Scheduler *sch) |
|
|
|
|
"Filtergraph input %u not connected to a source\n", j); |
|
|
|
|
return AVERROR(EINVAL); |
|
|
|
|
} |
|
|
|
|
av_assert0(fi->src.type == SCH_NODE_TYPE_DEC); |
|
|
|
|
dec = &sch->dec[fi->src.idx]; |
|
|
|
|
|
|
|
|
|
switch (dec->src.type) { |
|
|
|
|
case SCH_NODE_TYPE_DEMUX: fi->src_sched = dec->src; break; |
|
|
|
|
case SCH_NODE_TYPE_ENC: fi->src_sched = sch->enc[dec->src.idx].src; break; |
|
|
|
|
default: av_assert0(0); |
|
|
|
|
if (fi->src.type == SCH_NODE_TYPE_FILTER_OUT) |
|
|
|
|
fi->src_sched = fi->src; |
|
|
|
|
else { |
|
|
|
|
av_assert0(fi->src.type == SCH_NODE_TYPE_DEC); |
|
|
|
|
dec = &sch->dec[fi->src.idx]; |
|
|
|
|
|
|
|
|
|
switch (dec->src.type) { |
|
|
|
|
case SCH_NODE_TYPE_DEMUX: fi->src_sched = dec->src; break; |
|
|
|
|
case SCH_NODE_TYPE_ENC: fi->src_sched = sch->enc[dec->src.idx].src; break; |
|
|
|
|
default: av_assert0(0); |
|
|
|
|
} |
|
|
|
|
} |
|
|
|
|
} |
|
|
|
|
|
|
|
|
@ -2379,12 +2393,17 @@ void sch_filter_receive_finish(Scheduler *sch, unsigned fg_idx, unsigned in_idx) |
|
|
|
|
int sch_filter_send(Scheduler *sch, unsigned fg_idx, unsigned out_idx, AVFrame *frame) |
|
|
|
|
{ |
|
|
|
|
SchFilterGraph *fg; |
|
|
|
|
SchedulerNode dst; |
|
|
|
|
|
|
|
|
|
av_assert0(fg_idx < sch->nb_filters); |
|
|
|
|
fg = &sch->filters[fg_idx]; |
|
|
|
|
|
|
|
|
|
av_assert0(out_idx < fg->nb_outputs); |
|
|
|
|
return send_to_enc(sch, &sch->enc[fg->outputs[out_idx].dst.idx], frame); |
|
|
|
|
dst = fg->outputs[out_idx].dst; |
|
|
|
|
|
|
|
|
|
return (dst.type == SCH_NODE_TYPE_ENC) ? |
|
|
|
|
send_to_enc (sch, &sch->enc[dst.idx], frame) : |
|
|
|
|
send_to_filter(sch, &sch->filters[dst.idx], dst.idx_stream, frame); |
|
|
|
|
} |
|
|
|
|
|
|
|
|
|
static int filter_done(Scheduler *sch, unsigned fg_idx) |
|
|
|
@ -2396,8 +2415,11 @@ static int filter_done(Scheduler *sch, unsigned fg_idx) |
|
|
|
|
tq_receive_finish(fg->queue, i); |
|
|
|
|
|
|
|
|
|
for (unsigned i = 0; i < fg->nb_outputs; i++) { |
|
|
|
|
SchEnc *enc = &sch->enc[fg->outputs[i].dst.idx]; |
|
|
|
|
int err = send_to_enc(sch, enc, NULL); |
|
|
|
|
SchedulerNode dst = fg->outputs[i].dst; |
|
|
|
|
int err = (dst.type == SCH_NODE_TYPE_ENC) ? |
|
|
|
|
send_to_enc (sch, &sch->enc[dst.idx], NULL) : |
|
|
|
|
send_to_filter(sch, &sch->filters[dst.idx], dst.idx_stream, NULL); |
|
|
|
|
|
|
|
|
|
if (err < 0 && err != AVERROR_EOF) |
|
|
|
|
ret = err_merge(ret, err); |
|
|
|
|
} |
|
|
|
|