[v3,3/5] nouveau: fix mapping 2MB sysmem pages
diff mbox series

Message ID 20200701225352.9649-4-rcampbell@nvidia.com
State New
Headers show
Series
  • mm/hmm/nouveau: add PMD system memory mapping
Related show

Commit Message

Ralph Campbell July 1, 2020, 10:53 p.m. UTC
The nvif_object_ioctl() method NVIF_VMM_V0_PFNMAP wasn't correctly
setting the hardware specific GPU page table entries for 2MB sized
pages. Fix this by adding functions to set and clear PD0 GPU page
table entries.

Signed-off-by: Ralph Campbell <rcampbell@nvidia.com>
---
 drivers/gpu/drm/nouveau/nvkm/subdev/mmu/vmm.c |  5 +-
 .../drm/nouveau/nvkm/subdev/mmu/vmmgp100.c    | 82 +++++++++++++++++++
 2 files changed, 84 insertions(+), 3 deletions(-)

Comments

Ben Skeggs July 8, 2020, 3:19 a.m. UTC | #1
On Thu, 2 Jul 2020 at 08:54, Ralph Campbell <rcampbell@nvidia.com> wrote:
>
> The nvif_object_ioctl() method NVIF_VMM_V0_PFNMAP wasn't correctly
> setting the hardware specific GPU page table entries for 2MB sized
> pages. Fix this by adding functions to set and clear PD0 GPU page
> table entries.
I can take this one in my tree now, it's fairly independent of the rest.

Ben.

>
> Signed-off-by: Ralph Campbell <rcampbell@nvidia.com>
> ---
>  drivers/gpu/drm/nouveau/nvkm/subdev/mmu/vmm.c |  5 +-
>  .../drm/nouveau/nvkm/subdev/mmu/vmmgp100.c    | 82 +++++++++++++++++++
>  2 files changed, 84 insertions(+), 3 deletions(-)
>
> diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/mmu/vmm.c b/drivers/gpu/drm/nouveau/nvkm/subdev/mmu/vmm.c
> index 199f94e15c5f..19a6804e3989 100644
> --- a/drivers/gpu/drm/nouveau/nvkm/subdev/mmu/vmm.c
> +++ b/drivers/gpu/drm/nouveau/nvkm/subdev/mmu/vmm.c
> @@ -1204,7 +1204,6 @@ nvkm_vmm_pfn_unmap(struct nvkm_vmm *vmm, u64 addr, u64 size)
>  /*TODO:
>   * - Avoid PT readback (for dma_unmap etc), this might end up being dealt
>   *   with inside HMM, which would be a lot nicer for us to deal with.
> - * - Multiple page sizes (particularly for huge page support).
>   * - Support for systems without a 4KiB page size.
>   */
>  int
> @@ -1220,8 +1219,8 @@ nvkm_vmm_pfn_map(struct nvkm_vmm *vmm, u8 shift, u64 addr, u64 size, u64 *pfn)
>         /* Only support mapping where the page size of the incoming page
>          * array matches a page size available for direct mapping.
>          */
> -       while (page->shift && page->shift != shift &&
> -              page->desc->func->pfn == NULL)
> +       while (page->shift && (page->shift != shift ||
> +              page->desc->func->pfn == NULL))
>                 page++;
>
>         if (!page->shift || !IS_ALIGNED(addr, 1ULL << shift) ||
> diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/mmu/vmmgp100.c b/drivers/gpu/drm/nouveau/nvkm/subdev/mmu/vmmgp100.c
> index d86287565542..ed37fddd063f 100644
> --- a/drivers/gpu/drm/nouveau/nvkm/subdev/mmu/vmmgp100.c
> +++ b/drivers/gpu/drm/nouveau/nvkm/subdev/mmu/vmmgp100.c
> @@ -258,12 +258,94 @@ gp100_vmm_pd0_unmap(struct nvkm_vmm *vmm,
>         VMM_FO128(pt, vmm, pdei * 0x10, 0ULL, 0ULL, pdes);
>  }
>
> +static void
> +gp100_vmm_pd0_pfn_unmap(struct nvkm_vmm *vmm,
> +                       struct nvkm_mmu_pt *pt, u32 ptei, u32 ptes)
> +{
> +       struct device *dev = vmm->mmu->subdev.device->dev;
> +       dma_addr_t addr;
> +
> +       nvkm_kmap(pt->memory);
> +       while (ptes--) {
> +               u32 datalo = nvkm_ro32(pt->memory, pt->base + ptei * 16 + 0);
> +               u32 datahi = nvkm_ro32(pt->memory, pt->base + ptei * 16 + 4);
> +               u64 data   = (u64)datahi << 32 | datalo;
> +
> +               if ((data & (3ULL << 1)) != 0) {
> +                       addr = (data >> 8) << 12;
> +                       dma_unmap_page(dev, addr, 1UL << 21, DMA_BIDIRECTIONAL);
> +               }
> +               ptei++;
> +       }
> +       nvkm_done(pt->memory);
> +}
> +
> +static bool
> +gp100_vmm_pd0_pfn_clear(struct nvkm_vmm *vmm,
> +                       struct nvkm_mmu_pt *pt, u32 ptei, u32 ptes)
> +{
> +       bool dma = false;
> +
> +       nvkm_kmap(pt->memory);
> +       while (ptes--) {
> +               u32 datalo = nvkm_ro32(pt->memory, pt->base + ptei * 16 + 0);
> +               u32 datahi = nvkm_ro32(pt->memory, pt->base + ptei * 16 + 4);
> +               u64 data   = (u64)datahi << 32 | datalo;
> +
> +               if ((data & BIT_ULL(0)) && (data & (3ULL << 1)) != 0) {
> +                       VMM_WO064(pt, vmm, ptei * 16, data & ~BIT_ULL(0));
> +                       dma = true;
> +               }
> +               ptei++;
> +       }
> +       nvkm_done(pt->memory);
> +       return dma;
> +}
> +
> +static void
> +gp100_vmm_pd0_pfn(struct nvkm_vmm *vmm, struct nvkm_mmu_pt *pt,
> +                 u32 ptei, u32 ptes, struct nvkm_vmm_map *map)
> +{
> +       struct device *dev = vmm->mmu->subdev.device->dev;
> +       dma_addr_t addr;
> +
> +       nvkm_kmap(pt->memory);
> +       while (ptes--) {
> +               u64 data = 0;
> +
> +               if (!(*map->pfn & NVKM_VMM_PFN_W))
> +                       data |= BIT_ULL(6); /* RO. */
> +
> +               if (!(*map->pfn & NVKM_VMM_PFN_VRAM)) {
> +                       addr = *map->pfn >> NVKM_VMM_PFN_ADDR_SHIFT;
> +                       addr = dma_map_page(dev, pfn_to_page(addr), 0,
> +                                           1UL << 21, DMA_BIDIRECTIONAL);
> +                       if (!WARN_ON(dma_mapping_error(dev, addr))) {
> +                               data |= addr >> 4;
> +                               data |= 2ULL << 1; /* SYSTEM_COHERENT_MEMORY. */
> +                               data |= BIT_ULL(3); /* VOL. */
> +                               data |= BIT_ULL(0); /* VALID. */
> +                       }
> +               } else {
> +                       data |= (*map->pfn & NVKM_VMM_PFN_ADDR) >> 4;
> +                       data |= BIT_ULL(0); /* VALID. */
> +               }
> +
> +               VMM_WO064(pt, vmm, ptei++ * 16, data);
> +               map->pfn++;
> +       }
> +       nvkm_done(pt->memory);
> +}
> +
>  static const struct nvkm_vmm_desc_func
>  gp100_vmm_desc_pd0 = {
>         .unmap = gp100_vmm_pd0_unmap,
>         .sparse = gp100_vmm_pd0_sparse,
>         .pde = gp100_vmm_pd0_pde,
>         .mem = gp100_vmm_pd0_mem,
> +       .pfn = gp100_vmm_pd0_pfn,
> +       .pfn_clear = gp100_vmm_pd0_pfn_clear,
> +       .pfn_unmap = gp100_vmm_pd0_pfn_unmap,
>  };
>
>  static void
> --
> 2.20.1
>
> _______________________________________________
> Nouveau mailing list
> Nouveau@lists.freedesktop.org
> https://lists.freedesktop.org/mailman/listinfo/nouveau

Patch
diff mbox series

diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/mmu/vmm.c b/drivers/gpu/drm/nouveau/nvkm/subdev/mmu/vmm.c
index 199f94e15c5f..19a6804e3989 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/mmu/vmm.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/mmu/vmm.c
@@ -1204,7 +1204,6 @@  nvkm_vmm_pfn_unmap(struct nvkm_vmm *vmm, u64 addr, u64 size)
 /*TODO:
  * - Avoid PT readback (for dma_unmap etc), this might end up being dealt
  *   with inside HMM, which would be a lot nicer for us to deal with.
- * - Multiple page sizes (particularly for huge page support).
  * - Support for systems without a 4KiB page size.
  */
 int
@@ -1220,8 +1219,8 @@  nvkm_vmm_pfn_map(struct nvkm_vmm *vmm, u8 shift, u64 addr, u64 size, u64 *pfn)
 	/* Only support mapping where the page size of the incoming page
 	 * array matches a page size available for direct mapping.
 	 */
-	while (page->shift && page->shift != shift &&
-	       page->desc->func->pfn == NULL)
+	while (page->shift && (page->shift != shift ||
+	       page->desc->func->pfn == NULL))
 		page++;
 
 	if (!page->shift || !IS_ALIGNED(addr, 1ULL << shift) ||
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/mmu/vmmgp100.c b/drivers/gpu/drm/nouveau/nvkm/subdev/mmu/vmmgp100.c
index d86287565542..ed37fddd063f 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/mmu/vmmgp100.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/mmu/vmmgp100.c
@@ -258,12 +258,94 @@  gp100_vmm_pd0_unmap(struct nvkm_vmm *vmm,
 	VMM_FO128(pt, vmm, pdei * 0x10, 0ULL, 0ULL, pdes);
 }
 
+static void
+gp100_vmm_pd0_pfn_unmap(struct nvkm_vmm *vmm,
+			struct nvkm_mmu_pt *pt, u32 ptei, u32 ptes)
+{
+	struct device *dev = vmm->mmu->subdev.device->dev;
+	dma_addr_t addr;
+
+	nvkm_kmap(pt->memory);
+	while (ptes--) {
+		u32 datalo = nvkm_ro32(pt->memory, pt->base + ptei * 16 + 0);
+		u32 datahi = nvkm_ro32(pt->memory, pt->base + ptei * 16 + 4);
+		u64 data   = (u64)datahi << 32 | datalo;
+
+		if ((data & (3ULL << 1)) != 0) {
+			addr = (data >> 8) << 12;
+			dma_unmap_page(dev, addr, 1UL << 21, DMA_BIDIRECTIONAL);
+		}
+		ptei++;
+	}
+	nvkm_done(pt->memory);
+}
+
+static bool
+gp100_vmm_pd0_pfn_clear(struct nvkm_vmm *vmm,
+			struct nvkm_mmu_pt *pt, u32 ptei, u32 ptes)
+{
+	bool dma = false;
+
+	nvkm_kmap(pt->memory);
+	while (ptes--) {
+		u32 datalo = nvkm_ro32(pt->memory, pt->base + ptei * 16 + 0);
+		u32 datahi = nvkm_ro32(pt->memory, pt->base + ptei * 16 + 4);
+		u64 data   = (u64)datahi << 32 | datalo;
+
+		if ((data & BIT_ULL(0)) && (data & (3ULL << 1)) != 0) {
+			VMM_WO064(pt, vmm, ptei * 16, data & ~BIT_ULL(0));
+			dma = true;
+		}
+		ptei++;
+	}
+	nvkm_done(pt->memory);
+	return dma;
+}
+
+static void
+gp100_vmm_pd0_pfn(struct nvkm_vmm *vmm, struct nvkm_mmu_pt *pt,
+		  u32 ptei, u32 ptes, struct nvkm_vmm_map *map)
+{
+	struct device *dev = vmm->mmu->subdev.device->dev;
+	dma_addr_t addr;
+
+	nvkm_kmap(pt->memory);
+	while (ptes--) {
+		u64 data = 0;
+
+		if (!(*map->pfn & NVKM_VMM_PFN_W))
+			data |= BIT_ULL(6); /* RO. */
+
+		if (!(*map->pfn & NVKM_VMM_PFN_VRAM)) {
+			addr = *map->pfn >> NVKM_VMM_PFN_ADDR_SHIFT;
+			addr = dma_map_page(dev, pfn_to_page(addr), 0,
+					    1UL << 21, DMA_BIDIRECTIONAL);
+			if (!WARN_ON(dma_mapping_error(dev, addr))) {
+				data |= addr >> 4;
+				data |= 2ULL << 1; /* SYSTEM_COHERENT_MEMORY. */
+				data |= BIT_ULL(3); /* VOL. */
+				data |= BIT_ULL(0); /* VALID. */
+			}
+		} else {
+			data |= (*map->pfn & NVKM_VMM_PFN_ADDR) >> 4;
+			data |= BIT_ULL(0); /* VALID. */
+		}
+
+		VMM_WO064(pt, vmm, ptei++ * 16, data);
+		map->pfn++;
+	}
+	nvkm_done(pt->memory);
+}
+
 static const struct nvkm_vmm_desc_func
 gp100_vmm_desc_pd0 = {
 	.unmap = gp100_vmm_pd0_unmap,
 	.sparse = gp100_vmm_pd0_sparse,
 	.pde = gp100_vmm_pd0_pde,
 	.mem = gp100_vmm_pd0_mem,
+	.pfn = gp100_vmm_pd0_pfn,
+	.pfn_clear = gp100_vmm_pd0_pfn_clear,
+	.pfn_unmap = gp100_vmm_pd0_pfn_unmap,
 };
 
 static void