@@ -588,19 +588,24 @@ static void __gen8_do_map_pt(gen8_ppgtt_pde_t * const pde,
/* It's likely we'll map more than one pagetable at a time. This function will
* save us unnecessary kmap calls, but do no more functionally than multiple
* calls to map_pt. */
-static void gen8_map_pagetable_range(struct i915_pagedir *pd,
+static void gen8_map_pagetable_range(struct i915_address_space *vm,
+ struct i915_pagedir *pd,
uint64_t start,
- uint64_t length,
- struct drm_device *dev)
+ uint64_t length)
{
gen8_ppgtt_pde_t * const pagedir = kmap_atomic(pd->page);
struct i915_pagetab *pt;
uint64_t temp, pde;
- gen8_for_each_pde(pt, pd, start, length, temp, pde)
- __gen8_do_map_pt(pagedir + pde, pt, dev);
+ gen8_for_each_pde(pt, pd, start, length, temp, pde) {
+ __gen8_do_map_pt(pagedir + pde, pt, vm->dev);
+ trace_i915_pagetable_map(vm, pde, pt,
+ gen8_pte_index(start),
+ gen8_pte_count(start, length),
+ GEN8_PTES_PER_PT);
+ }
- if (!HAS_LLC(dev))
+ if (!HAS_LLC(vm->dev))
drm_clflush_virt_range(pagedir, PAGE_SIZE);
kunmap_atomic(pagedir);
@@ -668,6 +673,11 @@ static void gen8_teardown_va_range_3lvl(struct i915_address_space *vm,
pde, vm);
}
+ trace_i915_pagetable_unmap(vm, pde, pt,
+ gen8_pte_index(pd_start),
+ gen8_pte_count(pd_start, pd_len),
+ GEN8_PTES_PER_PT);
+
bitmap_clear(pt->used_ptes,
gen8_pte_index(pd_start),
gen8_pte_count(pd_start, pd_len));
@@ -680,6 +690,10 @@ static void gen8_teardown_va_range_3lvl(struct i915_address_space *vm,
continue;
}
free_pt_single(pt, dev);
+ trace_i915_pagetable_destroy(vm,
+ pde,
+ pd_start & GENMASK_ULL(64, GEN8_PDE_SHIFT),
+ GEN8_PDE_SHIFT);
pd->page_tables[pde] = NULL;
}
}
@@ -696,6 +710,9 @@ static void gen8_teardown_va_range_3lvl(struct i915_address_space *vm,
}
free_pd_single(pd, dev);
pdp->pagedirs[pdpe] = NULL;
+ trace_i915_pagedirectory_destroy(vm, pdpe,
+ start & GENMASK_ULL(64, GEN8_PDPE_SHIFT),
+ GEN8_PDPE_SHIFT);
}
}
@@ -793,6 +810,7 @@ static int gen8_ppgtt_alloc_pagetabs(struct i915_address_space *vm,
pd->page_tables[pde] = pt;
set_bit(pde, new_pts);
+ trace_i915_pagetable_alloc(vm, pde, start, GEN8_PDE_SHIFT);
}
return 0;
@@ -854,6 +872,7 @@ static int gen8_ppgtt_alloc_pagedirs(struct i915_address_space *vm,
pdp->pagedirs[pdpe] = pd;
set_bit(pdpe, new_pds);
+ trace_i915_pagedirectory_alloc(vm, pdpe, start, GEN8_PDPE_SHIFT);
}
return 0;
@@ -990,7 +1009,7 @@ static int gen8_alloc_va_range_3lvl(struct i915_address_space *vm,
}
set_bit(pdpe, pdp->used_pdpes);
- gen8_map_pagetable_range(pd, start, length, dev);
+ gen8_map_pagetable_range(vm, pd, start, length);
pd->zombie = 0;
}
@@ -1095,7 +1114,7 @@ static int gen8_aliasing_ppgtt_init(struct i915_hw_ppgtt *ppgtt)
}
gen8_for_each_pdpe(pd, pdp, start, size, temp, pdpe)
- gen8_map_pagetable_range(pd, start, size, dev);
+ gen8_map_pagetable_range(&ppgtt->base, pd,start, size);
ppgtt->base.allocate_va_range = NULL;
ppgtt->base.teardown_va_range = NULL;
@@ -225,6 +225,38 @@ DEFINE_EVENT(i915_pagetable, i915_pagetable_destroy,
TP_ARGS(vm, pde, start, pde_shift)
);
+DEFINE_EVENT_PRINT(i915_pagetable, i915_pagedirectory_alloc,
+ TP_PROTO(struct i915_address_space *vm, u32 pdpe, u64 start, u64 pdpe_shift),
+ TP_ARGS(vm, pdpe, start, pdpe_shift),
+
+ TP_printk("vm=%p, pdpe=%d (0x%llx-0x%llx)",
+ __entry->vm, __entry->pde, __entry->start, __entry->end)
+);
+
+DEFINE_EVENT_PRINT(i915_pagetable, i915_pagedirectory_destroy,
+ TP_PROTO(struct i915_address_space *vm, u32 pdpe, u64 start, u64 pdpe_shift),
+ TP_ARGS(vm, pdpe, start, pdpe_shift),
+
+ TP_printk("vm=%p, pdpe=%d (0x%llx-0x%llx)",
+ __entry->vm, __entry->pde, __entry->start, __entry->end)
+);
+
+DEFINE_EVENT_PRINT(i915_pagetable, i915_pagedirpo_alloc,
+ TP_PROTO(struct i915_address_space *vm, u32 pml4e, u64 start, u64 pml4e_shift),
+ TP_ARGS(vm, pml4e, start, pml4e_shift),
+
+ TP_printk("vm=%p, pml4e=%d (0x%llx-0x%llx)",
+ __entry->vm, __entry->pde, __entry->start, __entry->end)
+);
+
+DEFINE_EVENT_PRINT(i915_pagetable, i915_pagedirpo_destroy,
+ TP_PROTO(struct i915_address_space *vm, u32 pml4e, u64 start, u64 pml4e_shift),
+ TP_ARGS(vm, pml4e, start, pml4e_shift),
+
+ TP_printk("vm=%p, pml4e=%d (0x%llx-0x%llx)",
+ __entry->vm, __entry->pde, __entry->start, __entry->end)
+);
+
/* Avoid extra math because we only support two sizes. The format is defined by
* bitmap_scnprintf. Each 32 bits is 8 HEX digits followed by comma */
#define TRACE_PT_SIZE(bits) \
This works the same as GEN6. I was disappointed that I need to pass vm around now, but it's not so much uglier than the drm_device, and having the vm in trace events is hugely important. QUESTION: Now that I've rebased this on the zombie change, we probably want to call it teardown and track unmaps as opposed to destruction. v2: Consolidate pagetable/pagedirectory events Signed-off-by: Ben Widawsky <ben@bwidawsk.net> --- drivers/gpu/drm/i915/i915_gem_gtt.c | 35 +++++++++++++++++++++++++++-------- drivers/gpu/drm/i915/i915_trace.h | 32 ++++++++++++++++++++++++++++++++ 2 files changed, 59 insertions(+), 8 deletions(-)