@@ -929,6 +929,7 @@ typedef struct drm_i915_private {
struct work_struct work;
struct workqueue_struct *wq;
unsigned int next_flip_seq;
+ unsigned int queue_len;
} flip;
} drm_i915_private_t;
@@ -16,6 +16,21 @@
/* object tracking */
+TRACE_EVENT(i915_flip_queue_len,
+ TP_PROTO(unsigned int queue_len),
+ TP_ARGS(queue_len),
+
+ TP_STRUCT__entry(
+ __field(u32, queue_len)
+ ),
+
+ TP_fast_assign(
+ __entry->queue_len = queue_len;
+ ),
+
+ TP_printk("queue_len=%u", __entry->queue_len)
+);
+
TRACE_EVENT(i915_gem_object_pin_count,
TP_PROTO(struct drm_i915_gem_object *obj, u32 pin_count_pre, u32 pin_count_post),
TP_ARGS(obj, pin_count_pre, pin_count_post),
@@ -2195,9 +2195,12 @@ static void intel_atomic_process_flips_work(struct work_struct *work)
if (intel_flip->flip_seq != flip_seq)
break;
list_move_tail(&intel_flip->base.list, &flips);
+ dev_priv->flip.queue_len--;
}
}
+ trace_i915_flip_queue_len(dev_priv->flip.queue_len);
+
spin_unlock_irqrestore(&dev_priv->flip.lock, flags);
if (list_empty(&flips))
@@ -2441,8 +2444,12 @@ static void atomic_pipe_commit(struct drm_device *dev,
spin_lock_irqsave(&dev_priv->flip.lock, flags);
- list_for_each_entry_safe(intel_flip, next, &flips, base.list)
+ list_for_each_entry_safe(intel_flip, next, &flips, base.list) {
list_move_tail(&intel_flip->base.list, &dev_priv->flip.list);
+ dev_priv->flip.queue_len++;
+ }
+
+ trace_i915_flip_queue_len(dev_priv->flip.queue_len);
/* if no rings are involved, we can avoid checking seqnos */
if (rings_mask == 0)