如果GstAggregator接受三个输入,在aggregate是否会同步?
Function 1: gst_aggregator_check_pads_ready --- the gate
This is the function that checks whether every non-EOS sink pad has a buffer. It returns FALSE if any non-EOS pad is empty:
static gboolean
gst_aggregator_check_pads_ready (GstAggregator * self,
gboolean * have_event_or_query_ret)
{
GstAggregatorPad *pad = NULL;
GList *l, *sinkpads;
gboolean have_buffer = TRUE;
// ...
GST_OBJECT_LOCK (self);
sinkpads = GST_ELEMENT_CAST (self)->sinkpads;
for (l = sinkpads; l != NULL; l = l->next) { // iterate ALL sink pads
pad = l->data;
PAD_LOCK (pad);
// Check if pad has a clipped buffer or buffer at top of queue
if (!pad->priv->clipped_buffer
&& !GST_IS_BUFFER (g_queue_peek_tail (&pad->priv->data))) {
// No buffer on this pad --- is it EOS?
if (!pad->priv->eos) {
GST_LOG_OBJECT (pad, "Have no buffer and not EOS yet");
have_buffer = FALSE; // <--- ANY non-EOS pad without buffer = NOT READY
}
}
PAD_UNLOCK (pad);
}
if (!have_buffer)
goto pad_not_ready; // <--- returns FALSE
// ...
return TRUE; // only if ALL non-EOS pads have a buffer
pad_not_ready:
GST_LOG_OBJECT (self, "pad not ready to be aggregated yet");
GST_OBJECT_UNLOCK (self);
return FALSE;
}
The logic is straightforward: iterate every sink pad; if any non-EOS pad has no buffer, return FALSE.
Function 2: gst_aggregator_wait_and_check --- the blocker
This function calls check_pads_ready. If pads are not ready, it blocks (SRC_WAIT) until a pad pushes new data:
static gboolean
gst_aggregator_wait_and_check (GstAggregator * self, gboolean * timeout)
{
GstClockTime latency;
GstClockTime start;
*timeout = FALSE;
SRC_LOCK (self);
latency = gst_aggregator_get_latency_unlocked (self);
if (gst_aggregator_check_pads_ready (self, &have_event_or_query)) {
GST_DEBUG_OBJECT (self, "all pads have data"); // <--- all pads ready, proceed
SRC_UNLOCK (self);
return TRUE;
}
// Not all pads ready yet...
start = gst_aggregator_get_next_time (self); // calls nvdsmetamux's get_next_time()
// KEY BRANCH: for non-live pipelines or when start == GST_CLOCK_TIME_NONE
if (!GST_CLOCK_TIME_IS_VALID (latency) ||
!GST_IS_CLOCK (GST_ELEMENT_CLOCK (self)) ||
!GST_CLOCK_TIME_IS_VALID (start) || // <--- nvdsmetamux returns NONE here!
...) {
SRC_WAIT (self); // <--- BLOCKS until SRC_BROADCAST
}
res = gst_aggregator_check_pads_ready (self, NULL); // re-check after wakeup
SRC_UNLOCK (self);
return res;
}
Because nvdsmetamux returns GST_CLOCK_TIME_NONE from get_next_time, the condition !GST_CLOCK_TIME_IS_VALID(start) is TRUE, so it enters the SRC_WAIT branch --- an unconditional block until some pad pushes data (which calls SRC_BROADCAST).
Function 3: gst_aggregator_loop --- the main loop
This is the aggregate thread's main loop. It calls wait_and_check and only proceeds to aggregate() when it returns TRUE (i.e., all pads ready):
static GstFlowReturn
gst_aggregator_loop (GstAggregator * self)
{
GstAggregatorClass *klass = GST_AGGREGATOR_GET_CLASS (self);
gboolean timeout = FALSE;
while (priv->send_eos && priv->running) {
// ... handle events/queries ...
// GATE: wait until ALL pads have data
if (!gst_aggregator_wait_and_check (self, &timeout)) {
continue; // <--- not ready, loop back and wait again
}
// ALL pads have data --- call subclass aggregate()
if (timeout || flow_return >= GST_FLOW_OK) {
GST_LOG_OBJECT (self, "Actually aggregating, timeout: %d", timeout);
flow_return = klass->aggregate (self, timeout); // <--- calls nvdsmetamux_aggregate()
}
if (flow_return == GST_AGGREGATOR_FLOW_NEED_DATA)
continue; // <--- nvdsmetamux says "need more data", loop back
// ...
}
}
Bonus: gst_aggregator_pad_chain_internal --- upstream blocking
When a pad's internal queue already has a buffer and the aggregate thread hasn't consumed it yet, the upstream streaming thread blocks on a condition variable:
static GstFlowReturn
gst_aggregator_pad_chain_internal (GstAggregator * self,
GstAggregatorPad * aggpad, GstBuffer * buffer, gboolean head)
{
for (;;) {
SRC_LOCK (self);
GST_OBJECT_LOCK (self);
PAD_LOCK (aggpad);
if ((gst_aggregator_pad_has_space (self, aggpad) || !head)
&& aggpad->priv->flow_return == GST_FLOW_OK) {
// Space available --- enqueue buffer and signal aggregate thread
g_queue_push_head (&aggpad->priv->data, buffer);
aggpad->priv->num_buffers++;
SRC_BROADCAST (self); // <--- wake up aggregate thread
break;
}
// No space --- upstream thread BLOCKS here
GST_DEBUG_OBJECT (aggpad,
"Waiting for buffer to be consumed (chain) before enqueueing");
GST_OBJECT_UNLOCK (self);
SRC_UNLOCK (self);
PAD_WAIT_EVENT (aggpad); // <--- BLOCKS until aggregate() consumes buffer
PAD_UNLOCK (aggpad);
}
}
Summary: the complete waiting chain
Upstream Branch 2 thread Aggregate thread
| |
chain_internal() gst_aggregator_loop()
| |
enqueue buffer ──SRC_BROADCAST──> wait_and_check()
| |
| check_pads_ready()
| |
| ┌─ pad sink_0 has buffer? YES
| ├─ pad sink_1 has buffer? YES
| └─ pad sink_2 has buffer? NO ──> return FALSE
| |
| SRC_WAIT() ◄── BLOCKS here until sink_2 pushes
| |
| (Branch 2 finally pushes) |
| SRC_BROADCAST ──────────>|
| |
| check_pads_ready() → TRUE
| |
| nvdsmetamux_aggregate() ← finally runs
With 3 sink pads, the aggregate thread cannot run until all three branches deliver a buffer. The slowest branch dictates the frame rate. 这句话是关键,aggregate要等都有数据时,才会继续跑下去。