diff mbox series

[RFC,19/28] drm/xe: Update PT layer to understand ranges in VRAM

Message ID 20240828024901.2582335-20-matthew.brost@intel.com (mailing list archive)
State New, archived
Headers show
Series Introduce GPU SVM and Xe SVM implementation | expand

Commit Message

Matthew Brost Aug. 28, 2024, 2:48 a.m. UTC
Kinda cheating here using BO directly rather than VRAM pages. Same at
the moment as mixed mappings are not supported. If this changes, then
the arary of pages / dma addresses will need a cursor.

Signed-off-by: Matthew Brost <matthew.brost@intel.com>
---
 drivers/gpu/drm/xe/xe_pt.c  | 22 ++++++++++++++++------
 drivers/gpu/drm/xe/xe_svm.h | 10 ++++++++++
 2 files changed, 26 insertions(+), 6 deletions(-)
diff mbox series

Patch

diff --git a/drivers/gpu/drm/xe/xe_pt.c b/drivers/gpu/drm/xe/xe_pt.c
index fc86adf9f0a6..e9195029ea60 100644
--- a/drivers/gpu/drm/xe/xe_pt.c
+++ b/drivers/gpu/drm/xe/xe_pt.c
@@ -607,9 +607,12 @@  xe_pt_stage_bind(struct xe_tile *tile, struct xe_vma *vma,
 		 struct xe_vm_pgtable_update *entries, u32 *num_entries)
 {
 	struct xe_device *xe = tile_to_xe(tile);
-	struct xe_bo *bo = xe_vma_bo(vma);
-	bool is_devmem = !xe_vma_is_userptr(vma) && bo &&
-		(xe_bo_is_vram(bo) || xe_bo_is_stolen_devmem(bo));
+	bool range_devmem = range && xe_svm_range_in_vram(range);
+	struct xe_bo *bo = range_devmem ? range->base.vram_allocation :
+		xe_vma_bo(vma);
+	bool is_devmem = range_devmem ||
+		(!xe_vma_is_userptr(vma) && bo &&
+		(xe_bo_is_vram(bo) || xe_bo_is_stolen_devmem(bo)));
 	struct xe_res_cursor curs;
 	struct xe_pt_stage_bind_walk xe_walk = {
 		.base = {
@@ -675,9 +678,16 @@  xe_pt_stage_bind(struct xe_tile *tile, struct xe_vma *vma,
 	xe_bo_assert_held(bo);
 
 	if (range) {
-		xe_res_first_dma(range->base.dma_addr, 0,
-				 range->base.va.end - range->base.va.start,
-				 range->base.order, &curs);
+		if (is_devmem)
+			xe_res_first(bo->ttm.resource, 0,
+				     range->base.va.end - range->base.va.start,
+				     &curs);
+		else if (xe_svm_range_has_dma_mapping(range))
+			xe_res_first_dma(range->base.dma_addr, 0,
+					 range->base.va.end - range->base.va.start,
+					 range->base.order, &curs);
+		else
+			return -EAGAIN;	/* Invalidation corner case */
 	} else if (!xe_vma_is_null(vma)) {
 		if (xe_vma_is_userptr(vma))
 			xe_res_first_sg(to_userptr_vma(vma)->userptr.sg, 0,
diff --git a/drivers/gpu/drm/xe/xe_svm.h b/drivers/gpu/drm/xe/xe_svm.h
index f15df5c813f1..8b72e91cc37d 100644
--- a/drivers/gpu/drm/xe/xe_svm.h
+++ b/drivers/gpu/drm/xe/xe_svm.h
@@ -35,6 +35,16 @@  static inline bool xe_svm_range_pages_valid(struct xe_svm_range *range)
 	return drm_gpusvm_range_pages_valid(range->base.gpusvm, &range->base);
 }
 
+static inline bool xe_svm_range_in_vram(struct xe_svm_range *range)
+{
+	return range->base.flags.has_vram_pages;
+}
+
+static inline bool xe_svm_range_has_dma_mapping(struct xe_svm_range *range)
+{
+	return range->base.flags.has_dma_mapping;
+}
+
 #define xe_svm_notifier_lock(vm__)	\
 	drm_gpusvm_notifier_lock(&(vm__)->svm.gpusvm)