Message ID | 1435660040-17089-1-git-send-email-tvrtko.ursulin@linux.intel.com (mailing list archive) |
---|---|
State | New, archived |
Headers | show |
On Tue, Jun 30, 2015 at 11:27:20AM +0100, Tvrtko Ursulin wrote: > + list_for_each_entry(obj, &dev_priv->mm.bound_list, global_list) { > + list_for_each_entry(vma, &obj->vma_list, vma_link) If only there was already a list of vma bound in the GGTT... -Chris
On 06/30/2015 11:33 AM, Chris Wilson wrote: > On Tue, Jun 30, 2015 at 11:27:20AM +0100, Tvrtko Ursulin wrote: >> + list_for_each_entry(obj, &dev_priv->mm.bound_list, global_list) { >> + list_for_each_entry(vma, &obj->vma_list, vma_link) > > If only there was already a list of vma bound in the GGTT... Let me guess, you have patches for that? :) But for this maybe not yet, if I can hazard a guess you are referring to the big VMA rewrite? Which is still in progress? Regards, Tvrtko
On Tue, Jun 30, 2015 at 12:03:04PM +0100, Tvrtko Ursulin wrote: > > On 06/30/2015 11:33 AM, Chris Wilson wrote: > >On Tue, Jun 30, 2015 at 11:27:20AM +0100, Tvrtko Ursulin wrote: > >>+ list_for_each_entry(obj, &dev_priv->mm.bound_list, global_list) { > >>+ list_for_each_entry(vma, &obj->vma_list, vma_link) > > > >If only there was already a list of vma bound in the GGTT... > > Let me guess, you have patches for that? :) But for this maybe not > yet, if I can hazard a guess you are referring to the big VMA > rewrite? Which is still in progress? In upstream you can simply walk the lists of i915->gtt.base.active/i915->gtt.base.inactive. -Chris
Tested-By: Intel Graphics QA PRTS (Patch Regression Test System Contact: shuang.he@intel.com)
Task id: 6678
-------------------------------------Summary-------------------------------------
Platform Delta drm-intel-nightly Series Applied
ILK 302/302 302/302
SNB 312/316 312/316
IVB 343/343 343/343
BYT -2 287/287 285/287
HSW 380/380 380/380
-------------------------------------Detailed-------------------------------------
Platform Test drm-intel-nightly Series Applied
*BYT igt@gem_partial_pwrite_pread@reads-display PASS(1) FAIL(1)
*BYT igt@gem_tiled_partial_pwrite_pread@writes-after-reads PASS(1) FAIL(1)
Note: You need to pay more attention to line start with '*'
diff --git a/drivers/gpu/drm/i915/i915_debugfs.c b/drivers/gpu/drm/i915/i915_debugfs.c index 31d8768..3bb6c99 100644 --- a/drivers/gpu/drm/i915/i915_debugfs.c +++ b/drivers/gpu/drm/i915/i915_debugfs.c @@ -269,7 +269,7 @@ static int i915_gem_stolen_list_info(struct seq_file *m, void *data) list_add(&obj->obj_exec_link, &stolen); total_obj_size += obj->base.size; - total_gtt_size += i915_gem_obj_ggtt_size(obj); + total_gtt_size += i915_gem_obj_total_ggtt_size(obj); count++; } list_for_each_entry(obj, &dev_priv->mm.unbound_list, global_list) { @@ -299,7 +299,7 @@ static int i915_gem_stolen_list_info(struct seq_file *m, void *data) #define count_objects(list, member) do { \ list_for_each_entry(obj, list, member) { \ - size += i915_gem_obj_ggtt_size(obj); \ + size += i915_gem_obj_total_ggtt_size(obj); \ ++count; \ if (obj->map_and_fenceable) { \ mappable_size += i915_gem_obj_ggtt_size(obj); \ @@ -405,7 +405,7 @@ static void print_batch_pool_stats(struct seq_file *m, #define count_vmas(list, member) do { \ list_for_each_entry(vma, list, member) { \ - size += i915_gem_obj_ggtt_size(vma->obj); \ + size += i915_gem_obj_total_ggtt_size(vma->obj); \ ++count; \ if (vma->obj->map_and_fenceable) { \ mappable_size += i915_gem_obj_ggtt_size(vma->obj); \ @@ -535,7 +535,7 @@ static int i915_gem_gtt_info(struct seq_file *m, void *data) describe_obj(m, obj); seq_putc(m, '\n'); total_obj_size += obj->base.size; - total_gtt_size += i915_gem_obj_ggtt_size(obj); + total_gtt_size += i915_gem_obj_total_ggtt_size(obj); count++; } diff --git a/drivers/gpu/drm/i915/i915_drv.h b/drivers/gpu/drm/i915/i915_drv.h index ea9caf2..690b541 100644 --- a/drivers/gpu/drm/i915/i915_drv.h +++ b/drivers/gpu/drm/i915/i915_drv.h @@ -3010,6 +3010,9 @@ i915_gem_obj_ggtt_size(struct drm_i915_gem_object *obj) return i915_gem_obj_size(obj, i915_obj_to_ggtt(obj)); } +unsigned long +i915_gem_obj_total_ggtt_size(struct drm_i915_gem_object *obj); + static inline int __must_check i915_gem_obj_ggtt_pin(struct drm_i915_gem_object *obj, uint32_t alignment, diff --git a/drivers/gpu/drm/i915/i915_gem.c b/drivers/gpu/drm/i915/i915_gem.c index a2a4a27..32f6323 100644 --- a/drivers/gpu/drm/i915/i915_gem.c +++ b/drivers/gpu/drm/i915/i915_gem.c @@ -150,13 +150,16 @@ i915_gem_get_aperture_ioctl(struct drm_device *dev, void *data, struct drm_i915_private *dev_priv = dev->dev_private; struct drm_i915_gem_get_aperture *args = data; struct drm_i915_gem_object *obj; + struct i915_vma *vma; size_t pinned; pinned = 0; mutex_lock(&dev->struct_mutex); - list_for_each_entry(obj, &dev_priv->mm.bound_list, global_list) - if (i915_gem_obj_is_pinned(obj)) - pinned += i915_gem_obj_ggtt_size(obj); + list_for_each_entry(obj, &dev_priv->mm.bound_list, global_list) { + list_for_each_entry(vma, &obj->vma_list, vma_link) + if (i915_is_ggtt(vma->vm) && vma->pin_count) + pinned += vma->node.size; + } mutex_unlock(&dev->struct_mutex); args->aper_size = dev_priv->gtt.base.total; @@ -5469,3 +5472,17 @@ bool i915_gem_obj_is_pinned(struct drm_i915_gem_object *obj) return false; } +unsigned long +i915_gem_obj_total_ggtt_size(struct drm_i915_gem_object *obj) +{ + unsigned long size = 0; + struct i915_vma *vma; + + list_for_each_entry(vma, &obj->vma_list, vma_link) { + if (i915_is_ggtt(vma->vm) && + drm_mm_node_allocated(&vma->node)) + size += vma->node.size; + } + + return size; +}