@@ -464,6 +464,15 @@ typedef struct drm_i915_private {
struct list_head flushing_list;
/**
+ * List of objects currently pending a GPU write.
+ *
+ * All elements on this list will belong to either the
+ * active_list or flushing_list, last_rendering_seqno can
+ * be used to differentiate between the two elements.
+ */
+ struct list_head gpu_write_list;
+
+ /**
* LRU list of objects which are not in the ringbuffer and
* are ready to unbind, but are still in the GTT.
*
@@ -554,6 +563,8 @@ struct drm_i915_gem_object {
/** This object's place on the active/flushing/inactive lists */
struct list_head list;
+ /** This object's place on GPU write list */
+ struct list_head gpu_write_list;
/** This object's place on the fenced object LRU */
struct list_head fence_list;
@@ -1570,6 +1570,8 @@ i915_gem_object_move_to_inactive(struct drm_gem_object *obj)
else
list_move_tail(&obj_priv->list, &dev_priv->mm.inactive_list);
+ BUG_ON(!list_empty(&obj_priv->gpu_write_list));
+
obj_priv->last_rendering_seqno = 0;
if (obj_priv->active) {
obj_priv->active = 0;
@@ -1633,21 +1635,21 @@ i915_add_request(struct drm_device *dev, struct drm_file *file_priv,
INIT_LIST_HEAD(&request->client_list);
}
- /* Associate any objects on the flushing list matching the write
+ /* Associate any objects on the gpu_write_list matching the write
* domain we're flushing with our flush.
*/
- if (flush_domains != 0) {
+ if (flush_domains & I915_GEM_GPU_DOMAINS) {
struct drm_i915_gem_object *obj_priv, *next;
list_for_each_entry_safe(obj_priv, next,
- &dev_priv->mm.flushing_list, list) {
+ &dev_priv->mm.gpu_write_list,
+ gpu_write_list) {
struct drm_gem_object *obj = obj_priv->obj;
- if ((obj->write_domain & flush_domains) ==
- obj->write_domain) {
+ if (obj->write_domain & flush_domains) {
uint32_t old_write_domain = obj->write_domain;
-
obj->write_domain = 0;
+ list_del_init(&obj_priv->gpu_write_list);
i915_gem_object_move_to_active(obj, seqno);
trace_i915_gem_object_change_domain(obj,
@@ -1655,7 +1657,6 @@ i915_add_request(struct drm_device *dev, struct drm_file *file_priv,
old_write_domain);
}
}
-
}
if (!dev_priv->mm.suspended) {
@@ -2725,7 +2726,7 @@ i915_gem_object_flush_gpu_write_domain(struct drm_gem_object *obj)
old_write_domain = obj->write_domain;
i915_gem_flush(dev, 0, obj->write_domain);
seqno = i915_add_request(dev, NULL, obj->write_domain);
- obj->write_domain = 0;
+ BUG_ON(obj->write_domain);
i915_gem_object_move_to_active(obj, seqno);
trace_i915_gem_object_change_domain(obj,
@@ -3745,16 +3746,23 @@ i915_gem_execbuffer(struct drm_device *dev, void *data,
i915_gem_flush(dev,
dev->invalidate_domains,
dev->flush_domains);
- if (dev->flush_domains)
+ if (dev->flush_domains & I915_GEM_GPU_DOMAINS)
(void)i915_add_request(dev, file_priv,
dev->flush_domains);
}
for (i = 0; i < args->buffer_count; i++) {
struct drm_gem_object *obj = object_list[i];
+ struct drm_i915_gem_object *obj_priv = obj->driver_private;
uint32_t old_write_domain = obj->write_domain;
obj->write_domain = obj->pending_write_domain;
+ if (obj->write_domain)
+ list_move_tail(&obj_priv->gpu_write_list,
+ &dev_priv->mm.gpu_write_list);
+ else
+ list_del_init(&obj_priv->gpu_write_list);
+
trace_i915_gem_object_change_domain(obj,
obj->read_domains,
old_write_domain);
@@ -4147,6 +4155,7 @@ int i915_gem_init_object(struct drm_gem_object *obj)
obj_priv->obj = obj;
obj_priv->fence_reg = I915_FENCE_REG_NONE;
INIT_LIST_HEAD(&obj_priv->list);
+ INIT_LIST_HEAD(&obj_priv->gpu_write_list);
INIT_LIST_HEAD(&obj_priv->fence_list);
obj_priv->madv = I915_MADV_WILLNEED;
@@ -4281,14 +4290,18 @@ i915_gem_idle(struct drm_device *dev)
* the GPU domains and just stuff them onto inactive.
*/
while (!list_empty(&dev_priv->mm.active_list)) {
+ struct drm_i915_gem_object *obj_priv;
struct drm_gem_object *obj;
uint32_t old_write_domain;
- obj = list_first_entry(&dev_priv->mm.active_list,
- struct drm_i915_gem_object,
- list)->obj;
+ obj_priv = list_first_entry(&dev_priv->mm.active_list,
+ struct drm_i915_gem_object,
+ list);
+ obj = obj_priv->obj;
old_write_domain = obj->write_domain;
obj->write_domain &= ~I915_GEM_GPU_DOMAINS;
+ if (!list_empty(&obj_priv->gpu_write_list))
+ list_del_init (&obj_priv->gpu_write_list);
i915_gem_object_move_to_inactive(obj);
trace_i915_gem_object_change_domain(obj,
@@ -4298,14 +4311,18 @@ i915_gem_idle(struct drm_device *dev)
spin_unlock(&dev_priv->mm.active_list_lock);
while (!list_empty(&dev_priv->mm.flushing_list)) {
+ struct drm_i915_gem_object *obj_priv;
struct drm_gem_object *obj;
uint32_t old_write_domain;
- obj = list_first_entry(&dev_priv->mm.flushing_list,
- struct drm_i915_gem_object,
- list)->obj;
+ obj_priv = list_first_entry(&dev_priv->mm.flushing_list,
+ struct drm_i915_gem_object,
+ list);
+ obj = obj_priv->obj;
old_write_domain = obj->write_domain;
obj->write_domain &= ~I915_GEM_GPU_DOMAINS;
+ if (!list_empty(&obj_priv->gpu_write_list))
+ list_del_init (&obj_priv->gpu_write_list);
i915_gem_object_move_to_inactive(obj);
trace_i915_gem_object_change_domain(obj,
@@ -4598,6 +4615,7 @@ i915_gem_load(struct drm_device *dev)
spin_lock_init(&dev_priv->mm.active_list_lock);
INIT_LIST_HEAD(&dev_priv->mm.active_list);
INIT_LIST_HEAD(&dev_priv->mm.flushing_list);
+ INIT_LIST_HEAD(&dev_priv->mm.gpu_write_list);
INIT_LIST_HEAD(&dev_priv->mm.inactive_list);
INIT_LIST_HEAD(&dev_priv->mm.request_list);
INIT_LIST_HEAD(&dev_priv->mm.fence_list);