diff mbox series

[42/44] drm/amdkfd: Allow invalid pages in migration.src

Message ID 20210322105900.14068-43-Felix.Kuehling@amd.com (mailing list archive)
State New, archived
Headers show
Series Add HMM-based SVM memory manager to KFD v2 | expand

Commit Message

Felix Kuehling March 22, 2021, 10:58 a.m. UTC
This can happen when syste memory page were never allocated. Skip them
during the migration. 0-initialize the BO.

Signed-off-by: Felix Kuehling <Felix.Kuehling@amd.com>
---
 drivers/gpu/drm/amd/amdkfd/kfd_migrate.c | 50 ++++++++++++++++++------
 1 file changed, 38 insertions(+), 12 deletions(-)
diff mbox series

Patch

diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_migrate.c b/drivers/gpu/drm/amd/amdkfd/kfd_migrate.c
index 6748c5db64f5..87561b907543 100644
--- a/drivers/gpu/drm/amd/amdkfd/kfd_migrate.c
+++ b/drivers/gpu/drm/amd/amdkfd/kfd_migrate.c
@@ -310,7 +310,7 @@  svm_migrate_copy_to_vram(struct amdgpu_device *adev, struct svm_range *prange,
 	src = scratch;
 	dst = (uint64_t *)(scratch + npages);
 
-	r = svm_range_vram_node_new(adev, prange, false);
+	r = svm_range_vram_node_new(adev, prange, true);
 	if (r) {
 		pr_debug("failed %d get 0x%llx pages from vram\n", r, npages);
 		goto out;
@@ -328,17 +328,6 @@  svm_migrate_copy_to_vram(struct amdgpu_device *adev, struct svm_range *prange,
 	for (i = j = 0; i < npages; i++) {
 		struct page *spage;
 
-		spage = migrate_pfn_to_page(migrate->src[i]);
-		src[i] = dma_map_page(dev, spage, 0, PAGE_SIZE, DMA_TO_DEVICE);
-		r = dma_mapping_error(dev, src[i]);
-		if (r) {
-			pr_debug("failed %d dma_map_page\n", r);
-			goto out_free_vram_pages;
-		}
-
-		pr_debug("dma mapping src to 0x%llx, page_to_pfn 0x%lx\n",
-			 src[i] >> PAGE_SHIFT, page_to_pfn(spage));
-
 		dst[i] = vram_addr + (j << PAGE_SHIFT);
 		migrate->dst[i] = svm_migrate_addr_to_pfn(adev, dst[i]);
 		svm_migrate_get_vram_page(prange, migrate->dst[i]);
@@ -346,6 +335,43 @@  svm_migrate_copy_to_vram(struct amdgpu_device *adev, struct svm_range *prange,
 		migrate->dst[i] = migrate_pfn(migrate->dst[i]);
 		migrate->dst[i] |= MIGRATE_PFN_LOCKED;
 
+		if (migrate->src[i] & MIGRATE_PFN_VALID) {
+			spage = migrate_pfn_to_page(migrate->src[i]);
+			src[i] = dma_map_page(dev, spage, 0, PAGE_SIZE,
+					      DMA_TO_DEVICE);
+			r = dma_mapping_error(dev, src[i]);
+			if (r) {
+				pr_debug("failed %d dma_map_page\n", r);
+				goto out_free_vram_pages;
+			}
+		} else {
+			if (j) {
+				j--;
+				r = svm_migrate_copy_memory_gart(
+						adev, src + i - j,
+						dst + i - j, j + 1,
+						FROM_RAM_TO_VRAM,
+						mfence);
+				if (r)
+					goto out_free_vram_pages;
+				offset = j;
+				vram_addr = (node->start + offset) << PAGE_SHIFT;
+				j = 0;
+			}
+			offset++;
+			vram_addr += PAGE_SIZE;
+			if (offset >= node->size) {
+				node++;
+				pr_debug("next node size 0x%llx\n", node->size);
+				vram_addr = node->start << PAGE_SHIFT;
+				offset = 0;
+			}
+			continue;
+		}
+
+		pr_debug("dma mapping src to 0x%llx, page_to_pfn 0x%lx\n",
+			 src[i] >> PAGE_SHIFT, page_to_pfn(spage));
+
 		if (j + offset >= node->size - 1 && i < npages - 1) {
 			r = svm_migrate_copy_memory_gart(adev, src + i - j,
 							 dst + i - j, j + 1,