@@ -10,6 +10,7 @@ struct nvkm_vgpu_mem {
struct nvidia_vgpu_mem base;
struct nvkm_memory *mem;
struct nvkm_vgpu_mgr *vgpu_mgr;
+ struct nvkm_vma *bar1_vma;
};
struct nvkm_vgpu_mgr {
@@ -6,6 +6,7 @@
#include <subdev/bar.h>
#include <subdev/fb.h>
#include <subdev/gsp.h>
+#include <subdev/mmu.h>
#include <vgpu_mgr/vgpu_mgr.h>
#include <drm/nvkm_vgpu_mgr_vfio.h>
@@ -203,6 +204,50 @@ static struct nvidia_vgpu_mem *alloc_fbmem(void *handle, u64 size,
return base;
}
+static void bar1_unmap_mem(struct nvidia_vgpu_mem *base)
+{
+ struct nvkm_vgpu_mem *mem =
+ container_of(base, struct nvkm_vgpu_mem, base);
+ struct nvkm_vgpu_mgr *vgpu_mgr = mem->vgpu_mgr;
+ struct nvkm_device *device = vgpu_mgr->nvkm_dev;
+ struct nvkm_vmm *vmm = nvkm_bar_bar1_vmm(device);
+
+ iounmap(base->bar1_vaddr);
+ base->bar1_vaddr = NULL;
+ nvkm_vmm_put(vmm, &mem->bar1_vma);
+ mem->bar1_vma = NULL;
+}
+
+static int bar1_map_mem(struct nvidia_vgpu_mem *base)
+{
+ struct nvkm_vgpu_mem *mem =
+ container_of(base, struct nvkm_vgpu_mem, base);
+ struct nvkm_vgpu_mgr *vgpu_mgr = mem->vgpu_mgr;
+ struct nvkm_device *device = vgpu_mgr->nvkm_dev;
+ struct nvkm_vmm *vmm = nvkm_bar_bar1_vmm(device);
+ unsigned long paddr;
+ int ret;
+
+ if (WARN_ON(base->bar1_vaddr || mem->bar1_vma))
+ return -EEXIST;
+
+ ret = nvkm_vmm_get(vmm, 12, base->size, &mem->bar1_vma);
+ if (ret)
+ return ret;
+
+ ret = nvkm_memory_map(mem->mem, 0, vmm, mem->bar1_vma, NULL, 0);
+ if (ret) {
+ nvkm_vmm_put(vmm, &mem->bar1_vma);
+ return ret;
+ }
+
+ paddr = device->func->resource_addr(device, 1) +
+ mem->bar1_vma->addr;
+
+ base->bar1_vaddr = ioremap(paddr, base->size);
+ return 0;
+}
+
struct nvkm_vgpu_mgr_vfio_ops nvkm_vgpu_mgr_vfio_ops = {
.vgpu_mgr_is_enabled = vgpu_mgr_is_enabled,
.get_handle = get_handle,
@@ -219,6 +264,8 @@ struct nvkm_vgpu_mgr_vfio_ops nvkm_vgpu_mgr_vfio_ops = {
.free_chids = free_chids,
.alloc_fbmem = alloc_fbmem,
.free_fbmem = free_fbmem,
+ .bar1_map_mem = bar1_map_mem,
+ .bar1_unmap_mem = bar1_unmap_mem,
};
/**
@@ -19,6 +19,7 @@ struct nvidia_vgpu_gsp_client {
struct nvidia_vgpu_mem {
u64 addr;
u64 size;
+ void * __iomem bar1_vaddr;
};
struct nvkm_vgpu_mgr_vfio_ops {
@@ -45,6 +46,8 @@ struct nvkm_vgpu_mgr_vfio_ops {
struct nvidia_vgpu_mem *(*alloc_fbmem)(void *handle, u64 size,
bool vmmu_aligned);
void (*free_fbmem)(struct nvidia_vgpu_mem *mem);
+ int (*bar1_map_mem)(struct nvidia_vgpu_mem *mem);
+ void (*bar1_unmap_mem)(struct nvidia_vgpu_mem *mem);
};
struct nvkm_vgpu_mgr_vfio_ops *nvkm_vgpu_mgr_get_vfio_ops(void *handle);
The mgmt heap is a block of shared FBMEM between the GSP firmware and the vGPU manager. It is used for supporting vGPU RPCs, vGPU logging. To access the data structures of vGPU RPCs and vGPU logging, the mgmt heap FBMEM needs to mapped into BAR1 and the region in the BAR1 is required to be mapped into CPU vaddr. Expose the BAR1 map routines to NVIDIA vGPU VFIO module to map the mgmt heap. Signed-off-by: Zhi Wang <zhiw@nvidia.com> --- .../nouveau/include/nvkm/vgpu_mgr/vgpu_mgr.h | 1 + drivers/gpu/drm/nouveau/nvkm/vgpu_mgr/vfio.c | 47 +++++++++++++++++++ include/drm/nvkm_vgpu_mgr_vfio.h | 3 ++ 3 files changed, 51 insertions(+)