@@ -1485,6 +1485,14 @@ i915_gem_object_put_pages(struct drm_gem_object *obj)
obj_priv->pages = NULL;
}
+static uint32_t
+i915_gem_next_request_seqno(struct drm_device *dev)
+{
+ drm_i915_private_t *dev_priv = dev->dev_private;
+
+ return dev_priv->mm.next_gem_seqno;
+}
+
static void
i915_gem_object_move_to_active(struct drm_gem_object *obj, uint32_t seqno)
{
@@ -1497,6 +1505,11 @@ i915_gem_object_move_to_active(struct drm_gem_object *obj, uint32_t seqno)
drm_gem_object_reference(obj);
obj_priv->active = 1;
}
+
+ /* Take the seqno of the next request if none is given */
+ if (seqno == 0)
+ seqno = i915_gem_next_request_seqno(dev);
+
/* Move from whatever list we were on to the tail of execution. */
spin_lock(&dev_priv->mm.active_list_lock);
list_move_tail(&obj_priv->list,
@@ -1830,6 +1843,12 @@ i915_do_wait_request(struct drm_device *dev, uint32_t seqno, int interruptible)
BUG_ON(seqno == 0);
+ if (seqno == dev_priv->mm.next_gem_seqno) {
+ seqno = i915_add_request(dev, NULL, 0);
+ if (seqno == 0)
+ return -ENOMEM;
+ }
+
if (atomic_read(&dev_priv->mm.wedged))
return -EIO;
@@ -2888,7 +2907,6 @@ i915_gem_object_set_to_gtt_domain(struct drm_gem_object *obj, int write)
int
i915_gem_object_set_to_display_plane(struct drm_gem_object *obj)
{
- struct drm_device *dev = obj->dev;
struct drm_i915_gem_object *obj_priv = obj->driver_private;
uint32_t old_write_domain, old_read_domains;
int ret;
@@ -2898,17 +2916,10 @@ i915_gem_object_set_to_display_plane(struct drm_gem_object *obj)
return -EINVAL;
i915_gem_object_flush_gpu_write_domain(obj);
-
/* Wait on any GPU rendering and flushing to occur. */
- if (obj_priv->active) {
-#if WATCH_BUF
- DRM_INFO("%s: object %p wait for seqno %08x\n",
- __func__, obj, obj_priv->last_rendering_seqno);
-#endif
- ret = i915_do_wait_request(dev, obj_priv->last_rendering_seqno, 0);
- if (ret != 0)
- return ret;
- }
+ ret = i915_gem_object_wait_rendering(obj);
+ if (ret != 0)
+ return ret;
old_write_domain = obj->write_domain;
old_read_domains = obj->read_domains;