diff mbox series

[RFC,v2,06/12] drm/i915/svm: Device memory support

Message ID 20191213215614.24558-7-niranjana.vishwanathapura@intel.com (mailing list archive)
State New, archived
Headers show
Series drm/i915/svm: Add SVM support | expand

Commit Message

Niranjana Vishwanathapura Dec. 13, 2019, 9:56 p.m. UTC
Plugin device memory through HMM as DEVICE_PRIVATE.
Add support functions to allocate pages and free pages from device memory.
Implement ioctl to prefetch pages from host to device memory.
For now, only support migrating pages from host memory to device memory.

Cc: Joonas Lahtinen <joonas.lahtinen@linux.intel.com>
Cc: Jon Bloomfield <jon.bloomfield@intel.com>
Cc: Daniel Vetter <daniel.vetter@intel.com>
Cc: Sudeep Dutt <sudeep.dutt@intel.com>
Signed-off-by: Niranjana Vishwanathapura <niranjana.vishwanathapura@intel.com>
---
 drivers/gpu/drm/i915/Kconfig               |   9 +
 drivers/gpu/drm/i915/Makefile              |   3 +-
 drivers/gpu/drm/i915/gem/i915_gem_object.c |  13 -
 drivers/gpu/drm/i915/i915_buddy.h          |  12 +
 drivers/gpu/drm/i915/i915_drv.c            |   1 +
 drivers/gpu/drm/i915/i915_svm.c            |   6 +
 drivers/gpu/drm/i915/i915_svm.h            |  15 +
 drivers/gpu/drm/i915/i915_svm_devmem.c     | 400 +++++++++++++++++++++
 drivers/gpu/drm/i915/intel_memory_region.h |  14 +
 drivers/gpu/drm/i915/intel_region_lmem.c   |  10 +
 include/uapi/drm/i915_drm.h                |  22 ++
 11 files changed, 491 insertions(+), 14 deletions(-)
 create mode 100644 drivers/gpu/drm/i915/i915_svm_devmem.c

Comments

Jason Gunthorpe Dec. 17, 2019, 8:35 p.m. UTC | #1
On Fri, Dec 13, 2019 at 01:56:08PM -0800, Niranjana Vishwanathapura wrote:
> @@ -169,6 +170,11 @@ static int i915_range_fault(struct svm_notifier *sn,
>  			return ret;
>  		}
>  
> +		/* For dgfx, ensure the range is in device local memory only */
> +		regions = i915_dmem_convert_pfn(vm->i915, &range);
> +		if (!regions || (IS_DGFX(vm->i915) && (regions & REGION_SMEM)))
> +			return -EINVAL;
> +

This is not OK, as I said before, the driver cannot de-reference pfns
before doing the retry check, under lock.

> +
> +int i915_dmem_convert_pfn(struct drm_i915_private *dev_priv,
> +			  struct hmm_range *range)
> +{
> +	unsigned long i, npages;
> +	int regions = 0;
> +
> +	npages = (range->end - range->start) >> PAGE_SHIFT;
> +	for (i = 0; i < npages; ++i) {
> +		struct i915_buddy_block *block;
> +		struct intel_memory_region *mem;
> +		struct page *page;
> +		u64 addr;
> +
> +		page = hmm_device_entry_to_page(range, range->pfns[i]);
                        ^^^^^^^^^^^^^^^^^^^^^^

For instance, that cannot be done on a speculatively loaded page.

This also looks like it suffers from the same bug as

> +		if (!page)
> +			continue;
> +
> +		if (!(range->pfns[i] & range->flags[HMM_PFN_DEVICE_PRIVATE])) {
> +			regions |= REGION_SMEM;
> +			continue;
> +		}
> +
> +		if (!i915_dmem_page(dev_priv, page)) {
> +			WARN(1, "Some unknown device memory !\n");

Why is that a WARN? The user could put other device memory in the
address space. You need to 'segfault' the GPU execution if this happens.

> +			range->pfns[i] = 0;
> +			continue;
> +		}
> +
> +		regions |= REGION_LMEM;
> +		block = page->zone_device_data;
> +		mem = block->private;
> +		addr = mem->region.start +
> +		       i915_buddy_block_offset(block);
> +		addr += (page_to_pfn(page) - block->pfn_first) << PAGE_SHIFT;
> +
> +		range->pfns[i] &= ~range->flags[HMM_PFN_DEVICE_PRIVATE];
> +		range->pfns[i] &= ((1UL << range->pfn_shift) - 1);
> +		range->pfns[i] |= (addr >> PAGE_SHIFT) << range->pfn_shift;

This makes more sense as a direct manipulation of the sgl, not sure
why this routine is split out from the sgl builder?

Jason
Niranjana Vishwanathapura Dec. 18, 2019, 10:15 p.m. UTC | #2
On Tue, Dec 17, 2019 at 08:35:47PM +0000, Jason Gunthorpe wrote:
>On Fri, Dec 13, 2019 at 01:56:08PM -0800, Niranjana Vishwanathapura wrote:
>> @@ -169,6 +170,11 @@ static int i915_range_fault(struct svm_notifier *sn,
>>  			return ret;
>>  		}
>>
>> +		/* For dgfx, ensure the range is in device local memory only */
>> +		regions = i915_dmem_convert_pfn(vm->i915, &range);
>> +		if (!regions || (IS_DGFX(vm->i915) && (regions & REGION_SMEM)))
>> +			return -EINVAL;
>> +
>
>This is not OK, as I said before, the driver cannot de-reference pfns
>before doing the retry check, under lock.
>

Thanks.
Ok, will push it down and do it after validating the range.

>> +
>> +int i915_dmem_convert_pfn(struct drm_i915_private *dev_priv,
>> +			  struct hmm_range *range)
>> +{
>> +	unsigned long i, npages;
>> +	int regions = 0;
>> +
>> +	npages = (range->end - range->start) >> PAGE_SHIFT;
>> +	for (i = 0; i < npages; ++i) {
>> +		struct i915_buddy_block *block;
>> +		struct intel_memory_region *mem;
>> +		struct page *page;
>> +		u64 addr;
>> +
>> +		page = hmm_device_entry_to_page(range, range->pfns[i]);
>                        ^^^^^^^^^^^^^^^^^^^^^^
>
>For instance, that cannot be done on a speculatively loaded page.
>
>This also looks like it suffers from the same bug as
>

Ok.

>> +		if (!page)
>> +			continue;
>> +
>> +		if (!(range->pfns[i] & range->flags[HMM_PFN_DEVICE_PRIVATE])) {
>> +			regions |= REGION_SMEM;
>> +			continue;
>> +		}
>> +
>> +		if (!i915_dmem_page(dev_priv, page)) {
>> +			WARN(1, "Some unknown device memory !\n");
>
>Why is that a WARN? The user could put other device memory in the
>address space. You need to 'segfault' the GPU execution if this happens.
>

OK, will return an error here if user is trying to bind here.
I agree, we need to segfault the GPU if it is GPU fault handling.

>> +			range->pfns[i] = 0;
>> +			continue;
>> +		}
>> +
>> +		regions |= REGION_LMEM;
>> +		block = page->zone_device_data;
>> +		mem = block->private;
>> +		addr = mem->region.start +
>> +		       i915_buddy_block_offset(block);
>> +		addr += (page_to_pfn(page) - block->pfn_first) << PAGE_SHIFT;
>> +
>> +		range->pfns[i] &= ~range->flags[HMM_PFN_DEVICE_PRIVATE];
>> +		range->pfns[i] &= ((1UL << range->pfn_shift) - 1);
>> +		range->pfns[i] |= (addr >> PAGE_SHIFT) << range->pfn_shift;
>
>This makes more sense as a direct manipulation of the sgl, not sure
>why this routine is split out from the sgl builder?
>

Ok, yah, let me merge it with sgl building.

Thanks,
Niranjana

>Jason
diff mbox series

Patch

diff --git a/drivers/gpu/drm/i915/Kconfig b/drivers/gpu/drm/i915/Kconfig
index 689e57fe3973..66337f2ca2bf 100644
--- a/drivers/gpu/drm/i915/Kconfig
+++ b/drivers/gpu/drm/i915/Kconfig
@@ -141,9 +141,18 @@  config DRM_I915_SVM
 	bool "Enable Shared Virtual Memory support in i915"
 	depends on STAGING
 	depends on DRM_I915
+	depends on ARCH_ENABLE_MEMORY_HOTPLUG
+	depends on ARCH_ENABLE_MEMORY_HOTREMOVE
+	depends on MEMORY_HOTPLUG
+	depends on MEMORY_HOTREMOVE
+	depends on ARCH_HAS_PTE_DEVMAP
+	depends on SPARSEMEM_VMEMMAP
+	depends on ZONE_DEVICE
+	depends on DEVICE_PRIVATE
 	depends on MMU
 	select HMM_MIRROR
 	select MMU_NOTIFIER
+	select MIGRATE_VMA_HELPER
 	default n
 	help
 	  Choose this option if you want Shared Virtual Memory (SVM)
diff --git a/drivers/gpu/drm/i915/Makefile b/drivers/gpu/drm/i915/Makefile
index 7d4cd9eefd12..b574ec31ea2e 100644
--- a/drivers/gpu/drm/i915/Makefile
+++ b/drivers/gpu/drm/i915/Makefile
@@ -155,7 +155,8 @@  i915-y += \
 
 # SVM code
 i915-$(CONFIG_DRM_I915_SVM) += gem/i915_gem_svm.o \
-			       i915_svm.o
+			       i915_svm.o \
+			       i915_svm_devmem.o
 
 # general-purpose microcontroller (GuC) support
 obj-y += gt/uc/
diff --git a/drivers/gpu/drm/i915/gem/i915_gem_object.c b/drivers/gpu/drm/i915/gem/i915_gem_object.c
index 003d81c171d2..f868a301fc04 100644
--- a/drivers/gpu/drm/i915/gem/i915_gem_object.c
+++ b/drivers/gpu/drm/i915/gem/i915_gem_object.c
@@ -504,19 +504,6 @@  int __init i915_global_objects_init(void)
 	return 0;
 }
 
-static enum intel_region_id
-__region_id(u32 region)
-{
-	enum intel_region_id id;
-
-	for (id = 0; id < INTEL_REGION_UNKNOWN; ++id) {
-		if (intel_region_map[id] == region)
-			return id;
-	}
-
-	return INTEL_REGION_UNKNOWN;
-}
-
 bool
 i915_gem_object_svm_mapped(struct drm_i915_gem_object *obj)
 {
diff --git a/drivers/gpu/drm/i915/i915_buddy.h b/drivers/gpu/drm/i915/i915_buddy.h
index ed41f3507cdc..afc493e6c130 100644
--- a/drivers/gpu/drm/i915/i915_buddy.h
+++ b/drivers/gpu/drm/i915/i915_buddy.h
@@ -9,6 +9,9 @@ 
 #include <linux/bitops.h>
 #include <linux/list.h>
 
+/* 512 bits (one per pages) supports 2MB blocks */
+#define I915_BUDDY_MAX_PAGES   512
+
 struct i915_buddy_block {
 #define I915_BUDDY_HEADER_OFFSET GENMASK_ULL(63, 12)
 #define I915_BUDDY_HEADER_STATE  GENMASK_ULL(11, 10)
@@ -32,6 +35,15 @@  struct i915_buddy_block {
 	 */
 	struct list_head link;
 	struct list_head tmp_link;
+
+	unsigned long pfn_first;
+	/*
+	 * FIXME: There are other alternatives to bitmap. Like splitting the
+	 * block into contiguous 4K sized blocks. But it is part of bigger
+	 * issues involving partially invalidating large mapping, freeing the
+	 * blocks etc., revisit.
+	 */
+	unsigned long bitmap[BITS_TO_LONGS(I915_BUDDY_MAX_PAGES)];
 };
 
 #define I915_BUDDY_MAX_ORDER  I915_BUDDY_HEADER_ORDER
diff --git a/drivers/gpu/drm/i915/i915_drv.c b/drivers/gpu/drm/i915/i915_drv.c
index 866d3cbb1edf..f1b92fd3d234 100644
--- a/drivers/gpu/drm/i915/i915_drv.c
+++ b/drivers/gpu/drm/i915/i915_drv.c
@@ -2765,6 +2765,7 @@  static const struct drm_ioctl_desc i915_ioctls[] = {
 	DRM_IOCTL_DEF_DRV(I915_GEM_VM_CREATE, i915_gem_vm_create_ioctl, DRM_RENDER_ALLOW),
 	DRM_IOCTL_DEF_DRV(I915_GEM_VM_DESTROY, i915_gem_vm_destroy_ioctl, DRM_RENDER_ALLOW),
 	DRM_IOCTL_DEF_DRV(I915_GEM_VM_BIND, i915_gem_vm_bind_ioctl, DRM_RENDER_ALLOW),
+	DRM_IOCTL_DEF_DRV(I915_GEM_VM_PREFETCH, i915_gem_vm_prefetch_ioctl, DRM_RENDER_ALLOW)
 };
 
 static struct drm_driver driver = {
diff --git a/drivers/gpu/drm/i915/i915_svm.c b/drivers/gpu/drm/i915/i915_svm.c
index 5941be5b5803..31a80ae0dd45 100644
--- a/drivers/gpu/drm/i915/i915_svm.c
+++ b/drivers/gpu/drm/i915/i915_svm.c
@@ -152,6 +152,7 @@  static int i915_range_fault(struct svm_notifier *sn,
 	struct mm_struct *mm = sn->notifier.mm;
 	struct i915_address_space *vm = svm->vm;
 	u32 sg_page_sizes;
+	int regions;
 	u64 flags;
 	long ret;
 
@@ -169,6 +170,11 @@  static int i915_range_fault(struct svm_notifier *sn,
 			return ret;
 		}
 
+		/* For dgfx, ensure the range is in device local memory only */
+		regions = i915_dmem_convert_pfn(vm->i915, &range);
+		if (!regions || (IS_DGFX(vm->i915) && (regions & REGION_SMEM)))
+			return -EINVAL;
+
 		sg_page_sizes = i915_svm_build_sg(vm, &range, st);
 
 		mutex_lock(&svm->mutex);
diff --git a/drivers/gpu/drm/i915/i915_svm.h b/drivers/gpu/drm/i915/i915_svm.h
index a91e6a637f10..ae39c2ef2f18 100644
--- a/drivers/gpu/drm/i915/i915_svm.h
+++ b/drivers/gpu/drm/i915/i915_svm.h
@@ -33,6 +33,14 @@  static inline bool i915_vm_is_svm_enabled(struct i915_address_space *vm)
 	return vm->svm;
 }
 
+int i915_dmem_convert_pfn(struct drm_i915_private *dev_priv,
+			  struct hmm_range *range);
+int i915_gem_vm_prefetch_ioctl(struct drm_device *dev, void *data,
+			       struct drm_file *file_priv);
+struct i915_devmem *i915_svm_devmem_add(struct drm_i915_private *i915,
+					u64 size);
+void i915_svm_devmem_remove(struct i915_devmem *devmem);
+
 #else
 
 struct i915_svm { };
@@ -45,6 +53,13 @@  static inline int i915_svm_bind_mm(struct i915_address_space *vm)
 static inline bool i915_vm_is_svm_enabled(struct i915_address_space *vm)
 { return false; }
 
+static inline int i915_gem_vm_prefetch_ioctl(struct drm_device *dev, void *data,
+					     struct drm_file *file_priv)
+{ return -ENOTSUPP; }
+static inline
+struct i915_devmem *i915_svm_devmem_add(struct drm_i915_private *i915, u64 size)
+{ return NULL; }
+static inline void i915_svm_devmem_remove(struct i915_devmem *devmem) { }
 #endif
 
 #endif /* __I915_SVM_H */
diff --git a/drivers/gpu/drm/i915/i915_svm_devmem.c b/drivers/gpu/drm/i915/i915_svm_devmem.c
new file mode 100644
index 000000000000..0a1f1394f196
--- /dev/null
+++ b/drivers/gpu/drm/i915/i915_svm_devmem.c
@@ -0,0 +1,400 @@ 
+// SPDX-License-Identifier: MIT
+/*
+ * Copyright © 2019 Intel Corporation
+ */
+
+#include <linux/mm_types.h>
+#include <linux/sched/mm.h>
+
+#include "i915_svm.h"
+#include "intel_memory_region.h"
+
+struct i915_devmem_migrate {
+	struct drm_i915_private *i915;
+	struct migrate_vma *args;
+
+	enum intel_region_id src_id;
+	enum intel_region_id dst_id;
+	u64 npages;
+};
+
+struct i915_devmem {
+	struct drm_i915_private *i915;
+	struct dev_pagemap pagemap;
+	unsigned long pfn_first;
+	unsigned long pfn_last;
+};
+
+static inline bool
+i915_dmem_page(struct drm_i915_private *dev_priv, struct page *page)
+{
+	if (!is_device_private_page(page))
+		return false;
+
+	return true;
+}
+
+int i915_dmem_convert_pfn(struct drm_i915_private *dev_priv,
+			  struct hmm_range *range)
+{
+	unsigned long i, npages;
+	int regions = 0;
+
+	npages = (range->end - range->start) >> PAGE_SHIFT;
+	for (i = 0; i < npages; ++i) {
+		struct i915_buddy_block *block;
+		struct intel_memory_region *mem;
+		struct page *page;
+		u64 addr;
+
+		page = hmm_device_entry_to_page(range, range->pfns[i]);
+		if (!page)
+			continue;
+
+		if (!(range->pfns[i] & range->flags[HMM_PFN_DEVICE_PRIVATE])) {
+			regions |= REGION_SMEM;
+			continue;
+		}
+
+		if (!i915_dmem_page(dev_priv, page)) {
+			WARN(1, "Some unknown device memory !\n");
+			range->pfns[i] = 0;
+			continue;
+		}
+
+		regions |= REGION_LMEM;
+		block = page->zone_device_data;
+		mem = block->private;
+		addr = mem->region.start +
+		       i915_buddy_block_offset(block);
+		addr += (page_to_pfn(page) - block->pfn_first) << PAGE_SHIFT;
+
+		range->pfns[i] &= ~range->flags[HMM_PFN_DEVICE_PRIVATE];
+		range->pfns[i] &= ((1UL << range->pfn_shift) - 1);
+		range->pfns[i] |= (addr >> PAGE_SHIFT) << range->pfn_shift;
+	}
+
+	return regions;
+}
+
+static int
+i915_devmem_page_alloc_locked(struct intel_memory_region *mem,
+			      unsigned long npages,
+			      struct list_head *blocks)
+{
+	unsigned long size = ALIGN((npages * PAGE_SIZE), mem->mm.chunk_size);
+	struct i915_buddy_block *block;
+	int ret;
+
+	INIT_LIST_HEAD(blocks);
+	ret = __intel_memory_region_get_pages_buddy(mem, size, 0, blocks);
+	if (unlikely(ret))
+		goto alloc_failed;
+
+	list_for_each_entry(block, blocks, link) {
+		block->pfn_first = mem->devmem->pfn_first;
+		block->pfn_first += i915_buddy_block_offset(block) /
+				    PAGE_SIZE;
+		bitmap_zero(block->bitmap, I915_BUDDY_MAX_PAGES);
+		DRM_DEBUG_DRIVER("%s pfn_first 0x%lx off 0x%llx size 0x%llx\n",
+				 "Allocated block", block->pfn_first,
+				 i915_buddy_block_offset(block),
+				 i915_buddy_block_size(&mem->mm, block));
+	}
+
+alloc_failed:
+	return ret;
+}
+
+static struct page *
+i915_devmem_page_get_locked(struct intel_memory_region *mem,
+			    struct list_head *blocks)
+{
+	struct i915_buddy_block *block, *on;
+
+	list_for_each_entry_safe(block, on, blocks, link) {
+		unsigned long weight, max;
+		unsigned long i, pfn;
+		struct page *page;
+
+		max = i915_buddy_block_size(&mem->mm, block) / PAGE_SIZE;
+		i = find_first_zero_bit(block->bitmap, max);
+		if (unlikely(i == max)) {
+			WARN(1, "Getting a page should have never failed\n");
+			break;
+		}
+
+		set_bit(i, block->bitmap);
+		pfn = block->pfn_first + i;
+		page = pfn_to_page(pfn);
+		get_page(page);
+		lock_page(page);
+		page->zone_device_data = block;
+		weight = bitmap_weight(block->bitmap, max);
+		if (weight == max)
+			list_del_init(&block->link);
+		DRM_DEBUG_DRIVER("%s pfn 0x%lx block weight 0x%lx\n",
+				 "Allocated page", pfn, weight);
+		return page;
+	}
+	return NULL;
+}
+
+static void
+i915_devmem_page_free_locked(struct drm_i915_private *dev_priv,
+			     struct page *page)
+{
+	unlock_page(page);
+	put_page(page);
+}
+
+static int
+i915_devmem_migrate_alloc_and_copy(struct i915_devmem_migrate *migrate)
+{
+	struct drm_i915_private *i915 = migrate->i915;
+	struct migrate_vma *args = migrate->args;
+	struct intel_memory_region *mem;
+	struct list_head blocks = {0};
+	unsigned long i, npages, cnt;
+	struct page *page;
+	int ret;
+
+	npages = (args->end - args->start) >> PAGE_SHIFT;
+	DRM_DEBUG_DRIVER("start 0x%lx npages %ld\n", args->start, npages);
+
+	/* Check source pages */
+	for (i = 0, cnt = 0; i < npages; i++) {
+		args->dst[i] = 0;
+		page = migrate_pfn_to_page(args->src[i]);
+		if (unlikely(!page || !(args->src[i] & MIGRATE_PFN_MIGRATE)))
+			continue;
+
+		args->dst[i] = MIGRATE_PFN_VALID;
+		cnt++;
+	}
+
+	if (!cnt) {
+		ret = -ENOMEM;
+		goto migrate_out;
+	}
+
+	mem = i915->mm.regions[migrate->dst_id];
+	ret = i915_devmem_page_alloc_locked(mem, cnt, &blocks);
+	if (unlikely(ret))
+		goto migrate_out;
+
+	/* Allocate device memory */
+	for (i = 0, cnt = 0; i < npages; i++) {
+		if (!args->dst[i])
+			continue;
+
+		page = i915_devmem_page_get_locked(mem, &blocks);
+		if (unlikely(!page)) {
+			WARN(1, "Failed to get dst page\n");
+			args->dst[i] = 0;
+			continue;
+		}
+
+		cnt++;
+		args->dst[i] = migrate_pfn(page_to_pfn(page)) |
+			       MIGRATE_PFN_LOCKED;
+	}
+
+	if (!cnt) {
+		ret = -ENOMEM;
+		goto migrate_out;
+	}
+
+	/* Copy the pages */
+	migrate->npages = npages;
+migrate_out:
+	if (unlikely(ret)) {
+		for (i = 0; i < npages; i++) {
+			if (args->dst[i] & MIGRATE_PFN_LOCKED) {
+				page = migrate_pfn_to_page(args->dst[i]);
+				i915_devmem_page_free_locked(i915, page);
+			}
+			args->dst[i] = 0;
+		}
+	}
+
+	return ret;
+}
+
+void i915_devmem_migrate_finalize_and_map(struct i915_devmem_migrate *migrate)
+{
+	DRM_DEBUG_DRIVER("npages %lld\n", migrate->npages);
+}
+
+static void i915_devmem_migrate_chunk(struct i915_devmem_migrate *migrate)
+{
+	int ret;
+
+	ret = i915_devmem_migrate_alloc_and_copy(migrate);
+	if (!ret) {
+		migrate_vma_pages(migrate->args);
+		i915_devmem_migrate_finalize_and_map(migrate);
+	}
+	migrate_vma_finalize(migrate->args);
+}
+
+int i915_devmem_migrate_vma(struct intel_memory_region *mem,
+			    struct vm_area_struct *vma,
+			    unsigned long start,
+			    unsigned long end)
+{
+	unsigned long npages = (end - start) >> PAGE_SHIFT;
+	unsigned long max = min_t(unsigned long, I915_BUDDY_MAX_PAGES, npages);
+	struct i915_devmem_migrate migrate = {0};
+	struct migrate_vma args = {
+		.vma		= vma,
+		.start		= start,
+	};
+	unsigned long c, i;
+	int ret = 0;
+
+	/* XXX: Opportunistically migrate additional pages? */
+	DRM_DEBUG_DRIVER("start 0x%lx end 0x%lx\n", start, end);
+	args.src = kcalloc(max, sizeof(args.src), GFP_KERNEL);
+	if (unlikely(!args.src))
+		return -ENOMEM;
+
+	args.dst = kcalloc(max, sizeof(args.dst), GFP_KERNEL);
+	if (unlikely(!args.dst)) {
+		kfree(args.src);
+		return -ENOMEM;
+	}
+
+	/* XXX: Support migrating from LMEM to SMEM */
+	migrate.args = &args;
+	migrate.i915 = mem->i915;
+	migrate.src_id = INTEL_REGION_SMEM;
+	migrate.dst_id = MEMORY_TYPE_FROM_REGION(mem->id);
+	for (i = 0; i < npages; i += c) {
+		c = min_t(unsigned long, I915_BUDDY_MAX_PAGES, npages);
+		args.end = start + (c << PAGE_SHIFT);
+		ret = migrate_vma_setup(&args);
+		if (unlikely(ret))
+			goto migrate_done;
+		if (args.cpages)
+			i915_devmem_migrate_chunk(&migrate);
+		args.start = args.end;
+	}
+migrate_done:
+	kfree(args.dst);
+	kfree(args.src);
+	return ret;
+}
+
+static vm_fault_t i915_devmem_migrate_to_ram(struct vm_fault *vmf)
+{
+	return VM_FAULT_SIGBUS;
+}
+
+static void i915_devmem_page_free(struct page *page)
+{
+	struct i915_buddy_block *block = page->zone_device_data;
+	struct intel_memory_region *mem = block->private;
+	unsigned long i, max, weight;
+
+	max = i915_buddy_block_size(&mem->mm, block) / PAGE_SIZE;
+	i = page_to_pfn(page) - block->pfn_first;
+	clear_bit(i, block->bitmap);
+	weight = bitmap_weight(block->bitmap, max);
+	DRM_DEBUG_DRIVER("%s pfn 0x%lx block weight 0x%lx\n",
+			 "Freeing page", page_to_pfn(page), weight);
+	if (!weight) {
+		DRM_DEBUG_DRIVER("%s pfn_first 0x%lx off 0x%llx size 0x%llx\n",
+				 "Freeing block", block->pfn_first,
+				 i915_buddy_block_offset(block),
+				 i915_buddy_block_size(&mem->mm, block));
+		__intel_memory_region_put_block_buddy(block);
+	}
+}
+
+static const struct dev_pagemap_ops i915_devmem_pagemap_ops = {
+	.page_free = i915_devmem_page_free,
+	.migrate_to_ram = i915_devmem_migrate_to_ram,
+};
+
+struct i915_devmem *i915_svm_devmem_add(struct drm_i915_private *i915, u64 size)
+{
+	struct device *dev = &i915->drm.pdev->dev;
+	struct i915_devmem *devmem;
+	struct resource *res;
+
+	devmem = kzalloc(sizeof(*devmem), GFP_KERNEL);
+	if (!devmem)
+		return NULL;
+
+	devmem->i915 = i915;
+	res = devm_request_free_mem_region(dev, &iomem_resource, size);
+	if (IS_ERR(res))
+		goto out_free;
+
+	devmem->pagemap.type = MEMORY_DEVICE_PRIVATE;
+	devmem->pagemap.res = *res;
+	devmem->pagemap.ops = &i915_devmem_pagemap_ops;
+	if (IS_ERR(devm_memremap_pages(dev, &devmem->pagemap)))
+		goto out_free;
+
+	devmem->pfn_first = res->start >> PAGE_SHIFT;
+	devmem->pfn_last = res->end >> PAGE_SHIFT;
+	return devmem;
+out_free:
+	kfree(devmem);
+	return NULL;
+}
+
+void i915_svm_devmem_remove(struct i915_devmem *devmem)
+{
+	/* XXX: Is it the right way to release? */
+	release_resource(&devmem->pagemap.res);
+	kfree(devmem);
+}
+
+int i915_gem_vm_prefetch_ioctl(struct drm_device *dev, void *data,
+			       struct drm_file *file_priv)
+{
+	struct drm_i915_private *i915 = to_i915(dev);
+	struct drm_i915_gem_vm_prefetch *args = data;
+	unsigned long addr, end, size = args->length;
+	struct intel_memory_region *mem;
+	enum intel_region_id id;
+	struct mm_struct *mm;
+
+	if (args->type != I915_GEM_VM_PREFETCH_SVM_BUFFER)
+		return -EINVAL;
+
+	DRM_DEBUG_DRIVER("start 0x%llx length 0x%llx region 0x%x\n",
+			 args->start, args->length, args->region);
+	id = __region_id(args->region);
+	if ((MEMORY_TYPE_FROM_REGION(args->region) != INTEL_MEMORY_LOCAL) ||
+	    id == INTEL_REGION_UNKNOWN)
+		return -EINVAL;
+
+	mem = i915->mm.regions[id];
+
+	mm = get_task_mm(current);
+	down_read(&mm->mmap_sem);
+
+	for (addr = args->start, end = args->start + size; addr < end;) {
+		struct vm_area_struct *vma;
+		unsigned long next;
+
+		vma = find_vma_intersection(mm, addr, end);
+		if (!vma)
+			break;
+
+		addr &= PAGE_MASK;
+		next = min(vma->vm_end, end);
+		next = round_up(next, PAGE_SIZE);
+		/* This is a best effort so we ignore errors */
+		i915_devmem_migrate_vma(mem, vma, addr, next);
+		addr = next;
+	}
+
+	up_read(&mm->mmap_sem);
+	mmput(mm);
+	return 0;
+}
diff --git a/drivers/gpu/drm/i915/intel_memory_region.h b/drivers/gpu/drm/i915/intel_memory_region.h
index e3e8ab946d78..4c9dab6bca83 100644
--- a/drivers/gpu/drm/i915/intel_memory_region.h
+++ b/drivers/gpu/drm/i915/intel_memory_region.h
@@ -56,6 +56,19 @@  enum intel_region_id {
  */
 extern const u32 intel_region_map[];
 
+static inline enum intel_region_id
+__region_id(u32 region)
+{
+	enum intel_region_id id;
+
+	for (id = 0; id < INTEL_REGION_UNKNOWN; ++id) {
+		if (intel_region_map[id] == region)
+			return id;
+	}
+
+	return INTEL_REGION_UNKNOWN;
+}
+
 struct intel_memory_region_ops {
 	unsigned int flags;
 
@@ -71,6 +84,7 @@  struct intel_memory_region_ops {
 struct intel_memory_region {
 	struct drm_i915_private *i915;
 
+	struct i915_devmem *devmem;
 	const struct intel_memory_region_ops *ops;
 
 	struct io_mapping iomap;
diff --git a/drivers/gpu/drm/i915/intel_region_lmem.c b/drivers/gpu/drm/i915/intel_region_lmem.c
index eddb392917aa..2ba4a4720eb6 100644
--- a/drivers/gpu/drm/i915/intel_region_lmem.c
+++ b/drivers/gpu/drm/i915/intel_region_lmem.c
@@ -4,6 +4,7 @@ 
  */
 
 #include "i915_drv.h"
+#include "i915_svm.h"
 #include "intel_memory_region.h"
 #include "gem/i915_gem_lmem.h"
 #include "gem/i915_gem_region.h"
@@ -66,6 +67,7 @@  static void release_fake_lmem_bar(struct intel_memory_region *mem)
 static void
 region_lmem_release(struct intel_memory_region *mem)
 {
+	i915_svm_devmem_remove(mem->devmem);
 	release_fake_lmem_bar(mem);
 	io_mapping_fini(&mem->iomap);
 	intel_memory_region_release_buddy(mem);
@@ -122,6 +124,14 @@  intel_setup_fake_lmem(struct drm_i915_private *i915)
 					 PAGE_SIZE,
 					 io_start,
 					 &intel_region_lmem_ops);
+	if (!IS_ERR(mem)) {
+		mem->devmem = i915_svm_devmem_add(i915, mappable_end);
+		if (IS_ERR(mem->devmem)) {
+			intel_memory_region_put(mem);
+			mem = ERR_CAST(mem->devmem);
+		}
+	}
+
 	if (!IS_ERR(mem)) {
 		DRM_INFO("Intel graphics fake LMEM: %pR\n", &mem->region);
 		DRM_INFO("Intel graphics fake LMEM IO start: %llx\n",
diff --git a/include/uapi/drm/i915_drm.h b/include/uapi/drm/i915_drm.h
index 3164045446d8..f49e29716460 100644
--- a/include/uapi/drm/i915_drm.h
+++ b/include/uapi/drm/i915_drm.h
@@ -361,6 +361,7 @@  typedef struct _drm_i915_sarea {
 #define DRM_I915_GEM_VM_DESTROY		0x3b
 #define DRM_I915_GEM_OBJECT_SETPARAM	DRM_I915_GEM_CONTEXT_SETPARAM
 #define DRM_I915_GEM_VM_BIND		0x3c
+#define DRM_I915_GEM_VM_PREFETCH	0x3d
 /* Must be kept compact -- no holes */
 
 #define DRM_IOCTL_I915_INIT		DRM_IOW( DRM_COMMAND_BASE + DRM_I915_INIT, drm_i915_init_t)
@@ -426,6 +427,7 @@  typedef struct _drm_i915_sarea {
 #define DRM_IOCTL_I915_GEM_VM_DESTROY	DRM_IOW (DRM_COMMAND_BASE + DRM_I915_GEM_VM_DESTROY, struct drm_i915_gem_vm_control)
 #define DRM_IOCTL_I915_GEM_OBJECT_SETPARAM	DRM_IOWR(DRM_COMMAND_BASE + DRM_I915_GEM_OBJECT_SETPARAM, struct drm_i915_gem_object_param)
 #define DRM_IOCTL_I915_GEM_VM_BIND		DRM_IOWR(DRM_COMMAND_BASE + DRM_I915_GEM_VM_BIND, struct drm_i915_gem_vm_bind)
+#define DRM_IOCTL_I915_GEM_VM_PREFETCH		DRM_IOWR(DRM_COMMAND_BASE + DRM_I915_GEM_VM_PREFETCH, struct drm_i915_gem_vm_prefetch)
 
 /* Allow drivers to submit batchbuffers directly to hardware, relying
  * on the security mechanisms provided by hardware.
@@ -2334,6 +2336,26 @@  struct drm_i915_gem_vm_bind {
 #define I915_GEM_VM_BIND_READONLY    (1 << 1)
 };
 
+/**
+ * struct drm_i915_gem_vm_prefetch
+ *
+ * Prefetch an address range to a memory region.
+ */
+struct drm_i915_gem_vm_prefetch {
+	/** Type of memory to prefetch **/
+	__u32 type;
+#define I915_GEM_VM_PREFETCH_SVM_BUFFER   0
+
+	/** Memory region to prefetch to **/
+	__u32 region;
+
+	/** VA start to prefetch **/
+	__u64 start;
+
+	/** VA length to prefetch **/
+	__u64 length;
+};
+
 #if defined(__cplusplus)
 }
 #endif