@@ -54,7 +54,8 @@ static void __ib_umem_release(struct ib_device *dev, struct ib_umem *umem, int d
for_each_sg_page(umem->sg_head.sgl, &sg_iter, umem->sg_nents, 0) {
page = sg_page_iter_page(&sg_iter);
- put_user_pages_dirty_lock(&page, 1, umem->writable && dirty);
+ vaddr_unpin_pages_dirty_lock(&page, 1, &umem->vaddr_pin,
+ umem->writable && dirty);
}
sg_free_table(&umem->sg_head);
@@ -243,8 +244,15 @@ struct ib_umem *ib_umem_get(struct ib_udata *udata, unsigned long addr,
umem->length = size;
umem->address = addr;
umem->writable = ib_access_writable(access);
- umem->owning_mm = mm = current->mm;
- mmgrab(mm);
+ umem->vaddr_pin.mm = mm = current->mm;
+ mmgrab(umem->vaddr_pin.mm);
+
+ /* No need to get a reference to the core file object here. The key is
+ * that sys_file reference is held by the ufile. Any duplication of
+ * sys_file by the core will keep references active until all those
+ * contexts are closed out. No matter which process hold them open.
+ */
+ umem->vaddr_pin.f_owner = context->ufile->sys_file;
if (access & IB_ACCESS_ON_DEMAND) {
if (WARN_ON_ONCE(!context->invalidate_range)) {
@@ -292,11 +300,11 @@ struct ib_umem *ib_umem_get(struct ib_udata *udata, unsigned long addr,
while (npages) {
down_read(&mm->mmap_sem);
- ret = get_user_pages(cur_base,
+ ret = vaddr_pin_pages(cur_base,
min_t(unsigned long, npages,
PAGE_SIZE / sizeof (struct page *)),
- gup_flags | FOLL_LONGTERM,
- page_list, NULL);
+ gup_flags,
+ page_list, &umem->vaddr_pin);
if (ret < 0) {
up_read(&mm->mmap_sem);
goto umem_release;
@@ -336,7 +344,7 @@ struct ib_umem *ib_umem_get(struct ib_udata *udata, unsigned long addr,
free_page((unsigned long) page_list);
umem_kfree:
if (ret) {
- mmdrop(umem->owning_mm);
+ mmdrop(umem->vaddr_pin.mm);
kfree(umem);
}
return ret ? ERR_PTR(ret) : umem;
@@ -345,7 +353,7 @@ EXPORT_SYMBOL(ib_umem_get);
static void __ib_umem_release_tail(struct ib_umem *umem)
{
- mmdrop(umem->owning_mm);
+ mmdrop(umem->vaddr_pin.mm);
if (umem->is_odp)
kfree(to_ib_umem_odp(umem));
else
@@ -369,7 +377,7 @@ void ib_umem_release(struct ib_umem *umem)
__ib_umem_release(umem->context->device, umem, 1);
- atomic64_sub(ib_umem_num_pages(umem), &umem->owning_mm->pinned_vm);
+ atomic64_sub(ib_umem_num_pages(umem), &umem->vaddr_pin.mm->pinned_vm);
__ib_umem_release_tail(umem);
}
EXPORT_SYMBOL(ib_umem_release);
@@ -278,11 +278,11 @@ static int get_per_mm(struct ib_umem_odp *umem_odp)
*/
mutex_lock(&ctx->per_mm_list_lock);
list_for_each_entry(per_mm, &ctx->per_mm_list, ucontext_list) {
- if (per_mm->mm == umem_odp->umem.owning_mm)
+ if (per_mm->mm == umem_odp->umem.vaddr_pin.mm)
goto found;
}
- per_mm = alloc_per_mm(ctx, umem_odp->umem.owning_mm);
+ per_mm = alloc_per_mm(ctx, umem_odp->umem.vaddr_pin.mm);
if (IS_ERR(per_mm)) {
mutex_unlock(&ctx->per_mm_list_lock);
return PTR_ERR(per_mm);
@@ -355,8 +355,8 @@ struct ib_umem_odp *ib_alloc_odp_umem(struct ib_umem_odp *root,
umem->writable = root->umem.writable;
umem->is_odp = 1;
odp_data->per_mm = per_mm;
- umem->owning_mm = per_mm->mm;
- mmgrab(umem->owning_mm);
+ umem->vaddr_pin.mm = per_mm->mm;
+ mmgrab(umem->vaddr_pin.mm);
mutex_init(&odp_data->umem_mutex);
init_completion(&odp_data->notifier_completion);
@@ -389,7 +389,7 @@ struct ib_umem_odp *ib_alloc_odp_umem(struct ib_umem_odp *root,
out_page_list:
vfree(odp_data->page_list);
out_odp_data:
- mmdrop(umem->owning_mm);
+ mmdrop(umem->vaddr_pin.mm);
kfree(odp_data);
return ERR_PTR(ret);
}
@@ -399,10 +399,10 @@ int ib_umem_odp_get(struct ib_umem_odp *umem_odp, int access)
{
struct ib_umem *umem = &umem_odp->umem;
/*
- * NOTE: This must called in a process context where umem->owning_mm
+ * NOTE: This must called in a process context where umem->vaddr_pin.mm
* == current->mm
*/
- struct mm_struct *mm = umem->owning_mm;
+ struct mm_struct *mm = umem->vaddr_pin.mm;
int ret_val;
umem_odp->page_shift = PAGE_SHIFT;
@@ -581,7 +581,7 @@ int ib_umem_odp_map_dma_pages(struct ib_umem_odp *umem_odp, u64 user_virt,
unsigned long current_seq)
{
struct task_struct *owning_process = NULL;
- struct mm_struct *owning_mm = umem_odp->umem.owning_mm;
+ struct mm_struct *owning_mm = umem_odp->umem.vaddr_pin.mm;
struct page **local_page_list = NULL;
u64 page_mask, off;
int j, k, ret = 0, start_idx, npages = 0;
@@ -43,7 +43,6 @@ struct ib_umem_odp;
struct ib_umem {
struct ib_ucontext *context;
- struct mm_struct *owning_mm;
size_t length;
unsigned long address;
u32 writable : 1;
@@ -52,6 +51,7 @@ struct ib_umem {
struct sg_table sg_head;
int nmap;
unsigned int sg_nents;
+ struct vaddr_pin vaddr_pin;
};
/* Returns the offset of the umem start relative to the first page. */