diff mbox series

IB/core: Ensure an invalidate_range callback on ODP MR

Message ID 20190313190559.8068-1-ira.weiny@intel.com (mailing list archive)
State Mainlined
Commit 4ae27444100f54e6db3a046f086ba4e70e1ac22b
Delegated to: Jason Gunthorpe
Headers show
Series IB/core: Ensure an invalidate_range callback on ODP MR | expand

Commit Message

Ira Weiny March 13, 2019, 7:05 p.m. UTC
From: Ira Weiny <ira.weiny@intel.com>

No device supports ODP MR without an invalidate_range callback.

Warn on any any device which attempts to support ODP without
supplying this callback.

Then we can remove the checks for the callback within the code.

This stems from the discussion

https://www.spinics.net/lists/linux-rdma/msg76460.html

...which concluded this code was no longer necessary.

CC: Artemy Kovalyov <artemyko@mellanox.com>
Acked-by: John Hubbard <jhubbard@nvidia.com>
Reviewed-by: Haggai Eran <haggaie@mellanox.com>
Signed-off-by: Ira Weiny <ira.weiny@intel.com>
---
 drivers/infiniband/core/umem.c     |  5 +++++
 drivers/infiniband/core/umem_odp.c | 13 +++----------
 2 files changed, 8 insertions(+), 10 deletions(-)

Comments

Jason Gunthorpe March 27, 2019, 12:54 p.m. UTC | #1
On Wed, Mar 13, 2019 at 12:05:59PM -0700, ira.weiny@intel.com wrote:
> From: Ira Weiny <ira.weiny@intel.com>
> 
> No device supports ODP MR without an invalidate_range callback.
> 
> Warn on any any device which attempts to support ODP without
> supplying this callback.
> 
> Then we can remove the checks for the callback within the code.
> 
> This stems from the discussion
> 
> https://www.spinics.net/lists/linux-rdma/msg76460.html
> 
> ...which concluded this code was no longer necessary.
> 
> CC: Artemy Kovalyov <artemyko@mellanox.com>
> Acked-by: John Hubbard <jhubbard@nvidia.com>
> Reviewed-by: Haggai Eran <haggaie@mellanox.com>
> Signed-off-by: Ira Weiny <ira.weiny@intel.com>
> ---
>  drivers/infiniband/core/umem.c     |  5 +++++
>  drivers/infiniband/core/umem_odp.c | 13 +++----------
>  2 files changed, 8 insertions(+), 10 deletions(-)

Applied to for-next, thanks

Jason
diff mbox series

Patch

diff --git a/drivers/infiniband/core/umem.c b/drivers/infiniband/core/umem.c
index fe5551562dbc..89a7d57f9fa5 100644
--- a/drivers/infiniband/core/umem.c
+++ b/drivers/infiniband/core/umem.c
@@ -138,6 +138,11 @@  struct ib_umem *ib_umem_get(struct ib_udata *udata, unsigned long addr,
 	mmgrab(mm);
 
 	if (access & IB_ACCESS_ON_DEMAND) {
+		if (WARN_ON_ONCE(!context->invalidate_range)) {
+			ret = -EINVAL;
+			goto umem_kfree;
+		}
+
 		ret = ib_umem_odp_get(to_ib_umem_odp(umem), access);
 		if (ret)
 			goto umem_kfree;
diff --git a/drivers/infiniband/core/umem_odp.c b/drivers/infiniband/core/umem_odp.c
index e6ec79ad9cc8..6f8c36fcda78 100644
--- a/drivers/infiniband/core/umem_odp.c
+++ b/drivers/infiniband/core/umem_odp.c
@@ -241,7 +241,7 @@  static struct ib_ucontext_per_mm *alloc_per_mm(struct ib_ucontext *ctx,
 	per_mm->mm = mm;
 	per_mm->umem_tree = RB_ROOT_CACHED;
 	init_rwsem(&per_mm->umem_rwsem);
-	per_mm->active = ctx->invalidate_range;
+	per_mm->active = true;
 
 	rcu_read_lock();
 	per_mm->tgid = get_task_pid(current->group_leader, PIDTYPE_PID);
@@ -503,7 +503,6 @@  static int ib_umem_odp_map_dma_single_page(
 	struct ib_umem *umem = &umem_odp->umem;
 	struct ib_device *dev = umem->context->device;
 	dma_addr_t dma_addr;
-	int stored_page = 0;
 	int remove_existing_mapping = 0;
 	int ret = 0;
 
@@ -528,7 +527,6 @@  static int ib_umem_odp_map_dma_single_page(
 		umem_odp->dma_list[page_index] = dma_addr | access_mask;
 		umem_odp->page_list[page_index] = page;
 		umem->npages++;
-		stored_page = 1;
 	} else if (umem_odp->page_list[page_index] == page) {
 		umem_odp->dma_list[page_index] |= access_mask;
 	} else {
@@ -540,11 +538,9 @@  static int ib_umem_odp_map_dma_single_page(
 	}
 
 out:
-	/* On Demand Paging - avoid pinning the page */
-	if (umem->context->invalidate_range || !stored_page)
-		put_page(page);
+	put_page(page);
 
-	if (remove_existing_mapping && umem->context->invalidate_range) {
+	if (remove_existing_mapping) {
 		ib_umem_notifier_start_account(umem_odp);
 		umem->context->invalidate_range(
 			umem_odp,
@@ -754,9 +750,6 @@  void ib_umem_odp_unmap_dma_pages(struct ib_umem_odp *umem_odp, u64 virt,
 				 */
 				set_page_dirty(head_page);
 			}
-			/* on demand pinning support */
-			if (!umem->context->invalidate_range)
-				put_page(page);
 			umem_odp->page_list[idx] = NULL;
 			umem_odp->dma_list[idx] = 0;
 			umem->npages--;