@@ -885,6 +885,14 @@ typedef struct drm_i915_private {
uint32_t hw_context_size;
struct drm_flip_driver flip_driver;
+
+ struct {
+ struct list_head list;
+ spinlock_t lock;
+ struct work_struct work;
+ struct workqueue_struct *wq;
+ unsigned int next_flip_seq;
+ } flip;
} drm_i915_private_t;
/* Iterate over initialised rings */
@@ -343,6 +343,8 @@ static void notify_ring(struct drm_device *dev,
{
struct drm_i915_private *dev_priv = dev->dev_private;
+ intel_atomic_notify_ring(dev, ring);
+
if (ring->obj == NULL)
return;
@@ -42,6 +42,10 @@ struct intel_flip {
struct drm_pending_atomic_event *event;
uint32_t old_fb_id;
struct list_head pending_head;
+ struct intel_ring_buffer *ring;
+ u32 seqno;
+ bool busy;
+ unsigned int flip_seq;
};
struct intel_plane_state {
@@ -801,6 +805,7 @@ static int pin_fbs(struct drm_device *dev,
struct intel_atomic_state *s)
{
int i, ret;
+ bool nonblock = s->flags & DRM_MODE_ATOMIC_NONBLOCK;
for (i = 0; i < dev->mode_config.num_crtc; i++) {
struct intel_crtc_state *st = &s->crtc[i];
@@ -816,7 +821,7 @@ static int pin_fbs(struct drm_device *dev,
obj = to_intel_framebuffer(crtc->fb)->obj;
mutex_lock(&dev->struct_mutex);
- ret = intel_pin_and_fence_fb_obj(dev, obj, NULL);
+ ret = intel_pin_and_fence_fb_obj(dev, obj, nonblock ? obj->ring : NULL);
mutex_unlock(&dev->struct_mutex);
if (ret)
@@ -839,7 +844,7 @@ static int pin_fbs(struct drm_device *dev,
obj = to_intel_framebuffer(plane->fb)->obj;
mutex_lock(&dev->struct_mutex);
- ret = intel_pin_and_fence_fb_obj(dev, obj, NULL);
+ ret = intel_pin_and_fence_fb_obj(dev, obj, nonblock ? obj->ring : NULL);
mutex_unlock(&dev->struct_mutex);
if (ret)
@@ -2047,6 +2052,8 @@ static const struct drm_flip_driver_funcs intel_flip_driver_funcs = {
.flush = intel_flip_driver_flush,
};
+static void intel_atomic_process_flips_work(struct work_struct *work);
+
static void intel_flip_init(struct drm_device *dev)
{
struct drm_i915_private *dev_priv = dev->dev_private;
@@ -2065,6 +2072,11 @@ static void intel_flip_init(struct drm_device *dev)
list_for_each_entry(intel_plane, &dev->mode_config.plane_list, base.head)
drm_flip_helper_init(&intel_plane->flip_helper,
&dev_priv->flip_driver, &intel_flip_funcs);
+
+ INIT_LIST_HEAD(&dev_priv->flip.list);
+ spin_lock_init(&dev_priv->flip.lock);
+ INIT_WORK(&dev_priv->flip.work, intel_atomic_process_flips_work);
+ dev_priv->flip.wq = create_singlethread_workqueue("intel_flip");
}
static void intel_flip_fini(struct drm_device *dev)
@@ -2082,6 +2094,145 @@ static void intel_flip_fini(struct drm_device *dev)
drm_flip_driver_fini(&dev_priv->flip_driver);
}
+static void intel_atomic_postpone_flip(struct drm_device *dev,
+ struct intel_flip *intel_flip)
+{
+ struct drm_i915_private *dev_priv = dev->dev_private;
+ struct intel_ring_buffer *ring = intel_flip->ring;
+ int ret;
+
+ intel_flip->busy = ring != NULL;
+
+ list_move_tail(&intel_flip->base.list, &dev_priv->flip.list);
+
+ if (!ring)
+ return;
+
+ if (WARN_ON(!ring->irq_get(ring))) {
+ intel_flip->busy = false;
+ return;
+ }
+
+ ret = i915_add_request(ring, NULL, &intel_flip->seqno);
+ if (WARN_ON(ret)) {
+ ring->irq_put(ring);
+ intel_flip->busy = false;
+ return;
+ }
+}
+
+static void intel_atomic_schedule_flips(struct drm_i915_private *dev_priv,
+ struct intel_crtc *intel_crtc,
+ struct list_head *flips)
+{
+ if (!intel_crtc->active) {
+ drm_flip_driver_complete_flips(&dev_priv->flip_driver, flips);
+ return;
+ }
+
+ drm_flip_driver_prepare_flips(&dev_priv->flip_driver, flips);
+
+ local_irq_disable();
+
+ intel_pipe_vblank_evade(&intel_crtc->base);
+
+ drm_flip_driver_schedule_flips(&dev_priv->flip_driver, flips);
+
+ local_irq_enable();
+}
+
+static bool intel_atomic_flips_ready(struct drm_device *dev, unsigned int flip_seq)
+{
+ struct drm_i915_private *dev_priv = dev->dev_private;
+ struct intel_flip *intel_flip;
+
+ /* check if all flips w/ same flip_seq are ready */
+ list_for_each_entry(intel_flip, &dev_priv->flip.list, base.list) {
+ if (intel_flip->flip_seq != flip_seq)
+ break;
+
+ if (intel_flip->busy)
+ return false;
+ }
+
+ return true;
+}
+
+static void intel_atomic_process_flips_work(struct work_struct *work)
+{
+ struct drm_i915_private *dev_priv = container_of(work, struct drm_i915_private, flip.work);
+ struct drm_device *dev = dev_priv->dev;
+
+ for (;;) {
+ struct intel_flip *intel_flip, *next;
+ unsigned int flip_seq;
+ struct intel_crtc *intel_crtc;
+ LIST_HEAD(flips);
+ unsigned long flags;
+
+ if (list_empty(&dev_priv->flip.list))
+ return;
+
+ spin_lock_irqsave(&dev_priv->flip.lock, flags);
+
+ intel_flip = list_first_entry(&dev_priv->flip.list, struct intel_flip, base.list);
+ flip_seq = intel_flip->flip_seq;
+ intel_crtc = to_intel_crtc(intel_flip->crtc);
+
+ if (intel_atomic_flips_ready(dev, flip_seq)) {
+ list_for_each_entry_safe(intel_flip, next, &dev_priv->flip.list, base.list) {
+ if (intel_flip->flip_seq != flip_seq)
+ break;
+ list_move_tail(&intel_flip->base.list, &flips);
+ }
+ }
+
+ spin_unlock_irqrestore(&dev_priv->flip.lock, flags);
+
+ if (list_empty(&flips))
+ return;
+
+ mutex_lock(&dev->mode_config.mutex);
+ intel_atomic_schedule_flips(dev_priv, intel_crtc, &flips);
+ mutex_unlock(&dev->mode_config.mutex);
+ }
+}
+
+void intel_atomic_notify_ring(struct drm_device *dev,
+ struct intel_ring_buffer *ring)
+{
+ struct drm_i915_private *dev_priv = dev->dev_private;
+ struct intel_flip *intel_flip;
+ unsigned long flags;
+ u32 seqno;
+
+ if (list_empty(&dev_priv->flip.list))
+ return;
+
+ seqno = ring->get_seqno(ring, false);
+
+ spin_lock_irqsave(&dev_priv->flip.lock, flags);
+
+ list_for_each_entry(intel_flip, &dev_priv->flip.list, base.list) {
+ if (ring != intel_flip->ring)
+ continue;
+
+ if (intel_flip->busy && i915_seqno_passed(seqno, intel_flip->seqno)) {
+ intel_flip->busy = false;
+ ring->irq_put(ring);
+ }
+ }
+
+ if (!list_empty(&dev_priv->flip.list)) {
+ intel_flip = list_first_entry(&dev_priv->flip.list, struct intel_flip, base.list);
+
+ if (intel_atomic_flips_ready(dev, intel_flip->flip_seq))
+ queue_work(dev_priv->flip.wq, &dev_priv->flip.work);
+ }
+
+ spin_unlock_irqrestore(&dev_priv->flip.lock, flags);
+}
+
static void atomic_pipe_commit(struct drm_device *dev,
struct intel_atomic_state *state,
int pipe)
@@ -2090,7 +2241,9 @@ static void atomic_pipe_commit(struct drm_device *dev,
struct drm_i915_file_private *file_priv = state->file->driver_priv;
LIST_HEAD(flips);
int i;
- bool pipe_enabled = to_intel_crtc(intel_get_crtc_for_pipe(dev, pipe))->active;
+ unsigned int flip_seq = dev_priv->flip.next_flip_seq++;
+ struct intel_flip *intel_flip, *next;
+ unsigned long flags;
for (i = 0; i < dev->mode_config.num_crtc; i++) {
struct intel_crtc_state *st = &state->crtc[i];
@@ -2109,6 +2262,8 @@ static void atomic_pipe_commit(struct drm_device *dev,
drm_flip_init(&intel_flip->base, &intel_crtc->flip_helper);
+ intel_flip->flip_seq = flip_seq;
+
if (st->event) {
intel_flip->event = st->event;
st->event = NULL;
@@ -2121,6 +2276,9 @@ static void atomic_pipe_commit(struct drm_device *dev,
intel_flip->crtc = crtc;
+ if (crtc->fb)
+ intel_flip->ring = to_intel_framebuffer(crtc->fb)->obj->ring;
+
/* update primary_disabled befoer calc_plane() */
intel_crtc->primary_disabled = st->primary_disabled;
@@ -2164,6 +2322,8 @@ static void atomic_pipe_commit(struct drm_device *dev,
drm_flip_init(&intel_flip->base, &intel_plane->flip_helper);
+ intel_flip->flip_seq = flip_seq;
+
if (st->event) {
intel_flip->event = st->event;
st->event = NULL;
@@ -2177,6 +2337,9 @@ static void atomic_pipe_commit(struct drm_device *dev,
intel_flip->crtc = intel_get_crtc_for_pipe(dev, pipe);
intel_flip->plane = plane;
+ if (plane->fb)
+ intel_flip->ring = to_intel_framebuffer(plane->fb)->obj->ring;
+
intel_plane->calc(plane, plane->fb, &st->coords);
if (st->old.fb)
@@ -2196,20 +2359,12 @@ static void atomic_pipe_commit(struct drm_device *dev,
if (list_empty(&flips))
return;
- if (!pipe_enabled) {
- drm_flip_driver_complete_flips(&dev_priv->flip_driver, &flips);
- return;
- }
-
- drm_flip_driver_prepare_flips(&dev_priv->flip_driver, &flips);
+ spin_lock_irqsave(&dev_priv->flip.lock, flags);
+ list_for_each_entry_safe(intel_flip, next, &flips, base.list)
+ intel_atomic_postpone_flip(dev, intel_flip);
+ spin_unlock_irqrestore(&dev_priv->flip.lock, flags);
- local_irq_disable();
-
- intel_pipe_vblank_evade(intel_get_crtc_for_pipe(dev, pipe));
-
- drm_flip_driver_schedule_flips(&dev_priv->flip_driver, &flips);
-
- local_irq_enable();
+ queue_work(dev_priv->flip.wq, &dev_priv->flip.work);
}
void intel_atomic_handle_vblank(struct drm_device *dev, int pipe)
@@ -637,6 +637,7 @@ extern void intel_atomic_fini(struct drm_device *dev);
extern void intel_atomic_free_events(struct drm_device *dev, struct drm_file *file);
extern void intel_atomic_handle_vblank(struct drm_device *dev, int pipe);
extern void intel_atomic_clear_flips(struct drm_crtc *crtc);
+extern void intel_atomic_notify_ring(struct drm_device *dev, struct intel_ring_buffer *ring);
extern void intel_enable_primary(struct drm_crtc *crtc);
extern void intel_disable_primary(struct drm_crtc *crtc);