@@ -387,6 +387,10 @@ static struct ttm_tt *xe_ttm_tt_create(struct ttm_buffer_object *ttm_bo,
caching = ttm_uncached;
}
+ /* Clear TTM_TT_FLAG_ZERO_ALLOC when GPU is set to clear pages */
+ if (xe->mem.gpu_page_clear)
+ page_flags &= ~TTM_TT_FLAG_ZERO_ALLOC;
+
err = ttm_tt_init(&tt->ttm, &bo->ttm, page_flags, caching, extra_pages);
if (err) {
kfree(tt);
@@ -636,6 +636,13 @@ int xe_device_probe(struct xe_device *xe)
if (err)
goto err_irq_shutdown;
+ /**
+ * On iGFX device with flat CCS, we clear CCS metadata, let's extend that
+ * and use GPU to clear pages as well.
+ */
+ if (xe_device_has_flat_ccs(xe) && !IS_DGFX(xe))
+ xe->mem.gpu_page_clear = true;
+
err = xe_vram_probe(xe);
if (err)
goto err_irq_shutdown;
@@ -325,6 +325,8 @@ struct xe_device {
struct xe_mem_region vram;
/** @mem.sys_mgr: system TTM manager */
struct ttm_resource_manager sys_mgr;
+ /** @gpu_page_clear: clear pages offloaded to GPU */
+ bool gpu_page_clear;
} mem;
/** @sriov: device level virtualization data */
@@ -1003,6 +1003,7 @@ struct dma_fence *xe_migrate_clear(struct xe_migrate *m,
struct xe_gt *gt = m->tile->primary_gt;
struct xe_device *xe = gt_to_xe(gt);
bool clear_system_ccs = (xe_bo_needs_ccs_pages(bo) && !IS_DGFX(xe)) ? true : false;
+ bool clear_on_create = xe->mem.gpu_page_clear || clear_vram;
struct dma_fence *fence = NULL;
u64 size = bo->size;
struct xe_res_cursor src_it;
@@ -1032,7 +1033,7 @@ struct dma_fence *xe_migrate_clear(struct xe_migrate *m,
batch_size = 2 +
pte_update_size(m, clear_vram, src, &src_it,
&clear_L0, &clear_L0_ofs, &clear_L0_pt,
- clear_system_ccs ? 0 : emit_clear_cmd_len(gt), 0,
+ !clear_on_create ? 0 : emit_clear_cmd_len(gt), 0,
avail_pts);
if (xe_device_has_flat_ccs(xe))
@@ -1060,7 +1061,7 @@ struct dma_fence *xe_migrate_clear(struct xe_migrate *m,
bb->cs[bb->len++] = MI_BATCH_BUFFER_END;
update_idx = bb->len;
- if (!clear_system_ccs)
+ if (clear_on_create)
emit_clear(gt, bb, clear_L0_ofs, clear_L0, XE_PAGE_SIZE, clear_vram);
if (xe_device_has_flat_ccs(xe)) {
On LNL because of flat CCS, driver creates a migrate job to clear CCS meta data. Extend that to also clear system pages using GPU. Inform TTM to allocate pages without __GFP_ZERO to avoid double page clearing by clearing out TTM_TT_FLAG_ZERO_ALLOC flag. v2: Handle regression on dgfx(Himal) Update commit message as no ttm API changes needed. Cc: Himal Prasad Ghimiray <himal.prasad.ghimiray@intel.com> Cc: Matthew Auld <matthew.auld@intel.com> Cc: "Thomas Hellström" <thomas.hellstrom@linux.intel.com> Signed-off-by: Nirmoy Das <nirmoy.das@intel.com> O --- drivers/gpu/drm/xe/xe_bo.c | 4 ++++ drivers/gpu/drm/xe/xe_device.c | 7 +++++++ drivers/gpu/drm/xe/xe_device_types.h | 2 ++ drivers/gpu/drm/xe/xe_migrate.c | 5 +++-- 4 files changed, 16 insertions(+), 2 deletions(-)