diff mbox series

[RFT,v2,1/4] vpda: try to fix the potential crash due to misusing __GFP_NOFAIL

Message ID 20240731000155.109583-2-21cnbao@gmail.com (mailing list archive)
State New
Headers show
Series mm: clarify nofail memory allocation | expand

Commit Message

Barry Song July 31, 2024, 12:01 a.m. UTC
From: Barry Song <v-songbaohua@oppo.com>

mm doesn't support non-blockable __GFP_NOFAIL allocation. Because
__GFP_NOFAIL without direct reclamation may just result in a busy
loop within non-sleepable contexts.

static inline struct page *
__alloc_pages_slowpath(gfp_t gfp_mask, unsigned int order,
                                                struct alloc_context *ac)
{
        ...
        /*
         * Make sure that __GFP_NOFAIL request doesn't leak out and make sure
         * we always retry
         */
        if (gfp_mask & __GFP_NOFAIL) {
                /*
                 * All existing users of the __GFP_NOFAIL are blockable, so warn
                 * of any new users that actually require GFP_NOWAIT
                 */
                if (WARN_ON_ONCE_GFP(!can_direct_reclaim, gfp_mask))
                        goto fail;
                ...
        }
        ...
fail:
        warn_alloc(gfp_mask, ac->nodemask,
                        "page allocation failure: order:%u", order);
got_pg:
        return page;
}

Let's move the memory allocation out of the atomic context and use
the normal sleepable context to get pages.

[RFT]: This has only been compile-tested; I'd prefer if the VDPA maintainers
handles it.

Cc: "Michael S. Tsirkin" <mst@redhat.com>
Cc: Jason Wang <jasowang@redhat.com>
Cc: Xuan Zhuo <xuanzhuo@linux.alibaba.com>
Cc: "Eugenio Pérez" <eperezma@redhat.com>
Cc: Maxime Coquelin <maxime.coquelin@redhat.com>
Signed-off-by: Barry Song <v-songbaohua@oppo.com>
---
 drivers/vdpa/vdpa_user/iova_domain.c | 31 +++++++++++++++++++++++-----
 drivers/vdpa/vdpa_user/iova_domain.h |  5 ++++-
 drivers/vdpa/vdpa_user/vduse_dev.c   |  4 +++-
 3 files changed, 33 insertions(+), 7 deletions(-)

Comments

Jason Wang July 31, 2024, 3:09 a.m. UTC | #1
On Wed, Jul 31, 2024 at 8:03 AM Barry Song <21cnbao@gmail.com> wrote:
>
> From: Barry Song <v-songbaohua@oppo.com>
>
> mm doesn't support non-blockable __GFP_NOFAIL allocation. Because
> __GFP_NOFAIL without direct reclamation may just result in a busy
> loop within non-sleepable contexts.
>
> static inline struct page *
> __alloc_pages_slowpath(gfp_t gfp_mask, unsigned int order,
>                                                 struct alloc_context *ac)
> {
>         ...
>         /*
>          * Make sure that __GFP_NOFAIL request doesn't leak out and make sure
>          * we always retry
>          */
>         if (gfp_mask & __GFP_NOFAIL) {
>                 /*
>                  * All existing users of the __GFP_NOFAIL are blockable, so warn
>                  * of any new users that actually require GFP_NOWAIT
>                  */
>                 if (WARN_ON_ONCE_GFP(!can_direct_reclaim, gfp_mask))
>                         goto fail;
>                 ...
>         }
>         ...
> fail:
>         warn_alloc(gfp_mask, ac->nodemask,
>                         "page allocation failure: order:%u", order);
> got_pg:
>         return page;
> }
>
> Let's move the memory allocation out of the atomic context and use
> the normal sleepable context to get pages.
>
> [RFT]: This has only been compile-tested; I'd prefer if the VDPA maintainers
> handles it.
>
> Cc: "Michael S. Tsirkin" <mst@redhat.com>
> Cc: Jason Wang <jasowang@redhat.com>
> Cc: Xuan Zhuo <xuanzhuo@linux.alibaba.com>
> Cc: "Eugenio Pérez" <eperezma@redhat.com>
> Cc: Maxime Coquelin <maxime.coquelin@redhat.com>
> Signed-off-by: Barry Song <v-songbaohua@oppo.com>
> ---
>  drivers/vdpa/vdpa_user/iova_domain.c | 31 +++++++++++++++++++++++-----
>  drivers/vdpa/vdpa_user/iova_domain.h |  5 ++++-
>  drivers/vdpa/vdpa_user/vduse_dev.c   |  4 +++-
>  3 files changed, 33 insertions(+), 7 deletions(-)
>
> diff --git a/drivers/vdpa/vdpa_user/iova_domain.c b/drivers/vdpa/vdpa_user/iova_domain.c
> index 791d38d6284c..9318f059a8b5 100644
> --- a/drivers/vdpa/vdpa_user/iova_domain.c
> +++ b/drivers/vdpa/vdpa_user/iova_domain.c
> @@ -283,7 +283,23 @@ int vduse_domain_add_user_bounce_pages(struct vduse_iova_domain *domain,
>         return ret;
>  }
>
> -void vduse_domain_remove_user_bounce_pages(struct vduse_iova_domain *domain)
> +struct page **vduse_domain_alloc_pages_to_remove_bounce(struct vduse_iova_domain *domain)
> +{
> +       struct page **pages;
> +       unsigned long count, i;
> +
> +       if (!domain->user_bounce_pages)
> +               return NULL;
> +
> +       count = domain->bounce_size >> PAGE_SHIFT;
> +       pages = kmalloc_array(count, sizeof(*pages), GFP_KERNEL | __GFP_NOFAIL);
> +       for (i = 0; i < count; i++)
> +               pages[i] = alloc_page(GFP_KERNEL | __GFP_NOFAIL);
> +
> +       return pages;
> +}
> +
> +void vduse_domain_remove_user_bounce_pages(struct vduse_iova_domain *domain, struct page **pages)
>  {
>         struct vduse_bounce_map *map;
>         unsigned long i, count;
> @@ -294,15 +310,16 @@ void vduse_domain_remove_user_bounce_pages(struct vduse_iova_domain *domain)
>
>         count = domain->bounce_size >> PAGE_SHIFT;
>         for (i = 0; i < count; i++) {
> -               struct page *page = NULL;
> +               struct page *page = pages[i];
>
>                 map = &domain->bounce_maps[i];
> -               if (WARN_ON(!map->bounce_page))
> +               if (WARN_ON(!map->bounce_page)) {
> +                       put_page(page);
>                         continue;
> +               }
>
>                 /* Copy user page to kernel page if it's in use */
>                 if (map->orig_phys != INVALID_PHYS_ADDR) {
> -                       page = alloc_page(GFP_ATOMIC | __GFP_NOFAIL);
>                         memcpy_from_page(page_address(page),
>                                          map->bounce_page, 0, PAGE_SIZE);
>                 }
> @@ -310,6 +327,7 @@ void vduse_domain_remove_user_bounce_pages(struct vduse_iova_domain *domain)
>                 map->bounce_page = page;
>         }
>         domain->user_bounce_pages = false;
> +       kfree(pages);
>  out:
>         write_unlock(&domain->bounce_lock);
>  }
> @@ -543,10 +561,13 @@ static int vduse_domain_mmap(struct file *file, struct vm_area_struct *vma)
>  static int vduse_domain_release(struct inode *inode, struct file *file)
>  {
>         struct vduse_iova_domain *domain = file->private_data;
> +       struct page **pages;
> +
> +       pages = vduse_domain_alloc_pages_to_remove_bounce(domain);
>
>         spin_lock(&domain->iotlb_lock);
>         vduse_iotlb_del_range(domain, 0, ULLONG_MAX);
> -       vduse_domain_remove_user_bounce_pages(domain);
> +       vduse_domain_remove_user_bounce_pages(domain, pages);
>         vduse_domain_free_kernel_bounce_pages(domain);
>         spin_unlock(&domain->iotlb_lock);
>         put_iova_domain(&domain->stream_iovad);
> diff --git a/drivers/vdpa/vdpa_user/iova_domain.h b/drivers/vdpa/vdpa_user/iova_domain.h
> index f92f22a7267d..17efa5555b3f 100644
> --- a/drivers/vdpa/vdpa_user/iova_domain.h
> +++ b/drivers/vdpa/vdpa_user/iova_domain.h
> @@ -74,7 +74,10 @@ void vduse_domain_reset_bounce_map(struct vduse_iova_domain *domain);
>  int vduse_domain_add_user_bounce_pages(struct vduse_iova_domain *domain,
>                                        struct page **pages, int count);
>
> -void vduse_domain_remove_user_bounce_pages(struct vduse_iova_domain *domain);
> +void vduse_domain_remove_user_bounce_pages(struct vduse_iova_domain *domain,
> +                                          struct page **pages);
> +
> +struct page **vduse_domain_alloc_pages_to_remove_bounce(struct vduse_iova_domain *domain);
>
>  void vduse_domain_destroy(struct vduse_iova_domain *domain);
>
> diff --git a/drivers/vdpa/vdpa_user/vduse_dev.c b/drivers/vdpa/vdpa_user/vduse_dev.c
> index 7ae99691efdf..5d8d5810df57 100644
> --- a/drivers/vdpa/vdpa_user/vduse_dev.c
> +++ b/drivers/vdpa/vdpa_user/vduse_dev.c
> @@ -1030,6 +1030,7 @@ static int vduse_dev_queue_irq_work(struct vduse_dev *dev,
>  static int vduse_dev_dereg_umem(struct vduse_dev *dev,
>                                 u64 iova, u64 size)
>  {
> +       struct page **pages;
>         int ret;
>
>         mutex_lock(&dev->mem_lock);
> @@ -1044,7 +1045,8 @@ static int vduse_dev_dereg_umem(struct vduse_dev *dev,
>         if (dev->umem->iova != iova || size != dev->domain->bounce_size)
>                 goto unlock;
>
> -       vduse_domain_remove_user_bounce_pages(dev->domain);
> +       pages = vduse_domain_alloc_pages_to_remove_bounce(dev->domain);
> +       vduse_domain_remove_user_bounce_pages(dev->domain, pages);
>         unpin_user_pages_dirty_lock(dev->umem->pages,
>                                     dev->umem->npages, true);
>         atomic64_sub(dev->umem->npages, &dev->umem->mm->pinned_vm);

We miss a kfree(pages); here?

Thanks

> --
> 2.34.1
>
Barry Song July 31, 2024, 3:15 a.m. UTC | #2
On Wed, Jul 31, 2024 at 11:10 AM Jason Wang <jasowang@redhat.com> wrote:
>
> On Wed, Jul 31, 2024 at 8:03 AM Barry Song <21cnbao@gmail.com> wrote:
> >
> > From: Barry Song <v-songbaohua@oppo.com>
> >
> > mm doesn't support non-blockable __GFP_NOFAIL allocation. Because
> > __GFP_NOFAIL without direct reclamation may just result in a busy
> > loop within non-sleepable contexts.
> >
> > static inline struct page *
> > __alloc_pages_slowpath(gfp_t gfp_mask, unsigned int order,
> >                                                 struct alloc_context *ac)
> > {
> >         ...
> >         /*
> >          * Make sure that __GFP_NOFAIL request doesn't leak out and make sure
> >          * we always retry
> >          */
> >         if (gfp_mask & __GFP_NOFAIL) {
> >                 /*
> >                  * All existing users of the __GFP_NOFAIL are blockable, so warn
> >                  * of any new users that actually require GFP_NOWAIT
> >                  */
> >                 if (WARN_ON_ONCE_GFP(!can_direct_reclaim, gfp_mask))
> >                         goto fail;
> >                 ...
> >         }
> >         ...
> > fail:
> >         warn_alloc(gfp_mask, ac->nodemask,
> >                         "page allocation failure: order:%u", order);
> > got_pg:
> >         return page;
> > }
> >
> > Let's move the memory allocation out of the atomic context and use
> > the normal sleepable context to get pages.
> >
> > [RFT]: This has only been compile-tested; I'd prefer if the VDPA maintainers
> > handles it.
> >
> > Cc: "Michael S. Tsirkin" <mst@redhat.com>
> > Cc: Jason Wang <jasowang@redhat.com>
> > Cc: Xuan Zhuo <xuanzhuo@linux.alibaba.com>
> > Cc: "Eugenio Pérez" <eperezma@redhat.com>
> > Cc: Maxime Coquelin <maxime.coquelin@redhat.com>
> > Signed-off-by: Barry Song <v-songbaohua@oppo.com>
> > ---
> >  drivers/vdpa/vdpa_user/iova_domain.c | 31 +++++++++++++++++++++++-----
> >  drivers/vdpa/vdpa_user/iova_domain.h |  5 ++++-
> >  drivers/vdpa/vdpa_user/vduse_dev.c   |  4 +++-
> >  3 files changed, 33 insertions(+), 7 deletions(-)
> >
> > diff --git a/drivers/vdpa/vdpa_user/iova_domain.c b/drivers/vdpa/vdpa_user/iova_domain.c
> > index 791d38d6284c..9318f059a8b5 100644
> > --- a/drivers/vdpa/vdpa_user/iova_domain.c
> > +++ b/drivers/vdpa/vdpa_user/iova_domain.c
> > @@ -283,7 +283,23 @@ int vduse_domain_add_user_bounce_pages(struct vduse_iova_domain *domain,
> >         return ret;
> >  }
> >
> > -void vduse_domain_remove_user_bounce_pages(struct vduse_iova_domain *domain)
> > +struct page **vduse_domain_alloc_pages_to_remove_bounce(struct vduse_iova_domain *domain)
> > +{
> > +       struct page **pages;
> > +       unsigned long count, i;
> > +
> > +       if (!domain->user_bounce_pages)
> > +               return NULL;
> > +
> > +       count = domain->bounce_size >> PAGE_SHIFT;
> > +       pages = kmalloc_array(count, sizeof(*pages), GFP_KERNEL | __GFP_NOFAIL);
> > +       for (i = 0; i < count; i++)
> > +               pages[i] = alloc_page(GFP_KERNEL | __GFP_NOFAIL);
> > +
> > +       return pages;
> > +}
> > +
> > +void vduse_domain_remove_user_bounce_pages(struct vduse_iova_domain *domain, struct page **pages)
> >  {
> >         struct vduse_bounce_map *map;
> >         unsigned long i, count;
> > @@ -294,15 +310,16 @@ void vduse_domain_remove_user_bounce_pages(struct vduse_iova_domain *domain)
> >
> >         count = domain->bounce_size >> PAGE_SHIFT;
> >         for (i = 0; i < count; i++) {
> > -               struct page *page = NULL;
> > +               struct page *page = pages[i];
> >
> >                 map = &domain->bounce_maps[i];
> > -               if (WARN_ON(!map->bounce_page))
> > +               if (WARN_ON(!map->bounce_page)) {
> > +                       put_page(page);
> >                         continue;
> > +               }
> >
> >                 /* Copy user page to kernel page if it's in use */
> >                 if (map->orig_phys != INVALID_PHYS_ADDR) {
> > -                       page = alloc_page(GFP_ATOMIC | __GFP_NOFAIL);
> >                         memcpy_from_page(page_address(page),
> >                                          map->bounce_page, 0, PAGE_SIZE);
> >                 }
> > @@ -310,6 +327,7 @@ void vduse_domain_remove_user_bounce_pages(struct vduse_iova_domain *domain)
> >                 map->bounce_page = page;
> >         }
> >         domain->user_bounce_pages = false;
> > +       kfree(pages);
> >  out:
> >         write_unlock(&domain->bounce_lock);
> >  }
> > @@ -543,10 +561,13 @@ static int vduse_domain_mmap(struct file *file, struct vm_area_struct *vma)
> >  static int vduse_domain_release(struct inode *inode, struct file *file)
> >  {
> >         struct vduse_iova_domain *domain = file->private_data;
> > +       struct page **pages;
> > +
> > +       pages = vduse_domain_alloc_pages_to_remove_bounce(domain);
> >
> >         spin_lock(&domain->iotlb_lock);
> >         vduse_iotlb_del_range(domain, 0, ULLONG_MAX);
> > -       vduse_domain_remove_user_bounce_pages(domain);
> > +       vduse_domain_remove_user_bounce_pages(domain, pages);
> >         vduse_domain_free_kernel_bounce_pages(domain);
> >         spin_unlock(&domain->iotlb_lock);
> >         put_iova_domain(&domain->stream_iovad);
> > diff --git a/drivers/vdpa/vdpa_user/iova_domain.h b/drivers/vdpa/vdpa_user/iova_domain.h
> > index f92f22a7267d..17efa5555b3f 100644
> > --- a/drivers/vdpa/vdpa_user/iova_domain.h
> > +++ b/drivers/vdpa/vdpa_user/iova_domain.h
> > @@ -74,7 +74,10 @@ void vduse_domain_reset_bounce_map(struct vduse_iova_domain *domain);
> >  int vduse_domain_add_user_bounce_pages(struct vduse_iova_domain *domain,
> >                                        struct page **pages, int count);
> >
> > -void vduse_domain_remove_user_bounce_pages(struct vduse_iova_domain *domain);
> > +void vduse_domain_remove_user_bounce_pages(struct vduse_iova_domain *domain,
> > +                                          struct page **pages);
> > +
> > +struct page **vduse_domain_alloc_pages_to_remove_bounce(struct vduse_iova_domain *domain);
> >
> >  void vduse_domain_destroy(struct vduse_iova_domain *domain);
> >
> > diff --git a/drivers/vdpa/vdpa_user/vduse_dev.c b/drivers/vdpa/vdpa_user/vduse_dev.c
> > index 7ae99691efdf..5d8d5810df57 100644
> > --- a/drivers/vdpa/vdpa_user/vduse_dev.c
> > +++ b/drivers/vdpa/vdpa_user/vduse_dev.c
> > @@ -1030,6 +1030,7 @@ static int vduse_dev_queue_irq_work(struct vduse_dev *dev,
> >  static int vduse_dev_dereg_umem(struct vduse_dev *dev,
> >                                 u64 iova, u64 size)
> >  {
> > +       struct page **pages;
> >         int ret;
> >
> >         mutex_lock(&dev->mem_lock);
> > @@ -1044,7 +1045,8 @@ static int vduse_dev_dereg_umem(struct vduse_dev *dev,
> >         if (dev->umem->iova != iova || size != dev->domain->bounce_size)
> >                 goto unlock;
> >
> > -       vduse_domain_remove_user_bounce_pages(dev->domain);
> > +       pages = vduse_domain_alloc_pages_to_remove_bounce(dev->domain);
> > +       vduse_domain_remove_user_bounce_pages(dev->domain, pages);
> >         unpin_user_pages_dirty_lock(dev->umem->pages,
> >                                     dev->umem->npages, true);
> >         atomic64_sub(dev->umem->npages, &dev->umem->mm->pinned_vm);
>
> We miss a kfree(pages); here?
no.
i've moved it into vduse_domain_remove_user_bounce_pages.

>
> Thanks
>
> > --
> > 2.34.1
> >
>
Jason Wang July 31, 2024, 3:58 a.m. UTC | #3
On Wed, Jul 31, 2024 at 11:15 AM Barry Song <21cnbao@gmail.com> wrote:
>
> On Wed, Jul 31, 2024 at 11:10 AM Jason Wang <jasowang@redhat.com> wrote:
> >
> > On Wed, Jul 31, 2024 at 8:03 AM Barry Song <21cnbao@gmail.com> wrote:
> > >
> > > From: Barry Song <v-songbaohua@oppo.com>
> > >
> > > mm doesn't support non-blockable __GFP_NOFAIL allocation. Because
> > > __GFP_NOFAIL without direct reclamation may just result in a busy
> > > loop within non-sleepable contexts.
> > >
> > > static inline struct page *
> > > __alloc_pages_slowpath(gfp_t gfp_mask, unsigned int order,
> > >                                                 struct alloc_context *ac)
> > > {
> > >         ...
> > >         /*
> > >          * Make sure that __GFP_NOFAIL request doesn't leak out and make sure
> > >          * we always retry
> > >          */
> > >         if (gfp_mask & __GFP_NOFAIL) {
> > >                 /*
> > >                  * All existing users of the __GFP_NOFAIL are blockable, so warn
> > >                  * of any new users that actually require GFP_NOWAIT
> > >                  */
> > >                 if (WARN_ON_ONCE_GFP(!can_direct_reclaim, gfp_mask))
> > >                         goto fail;
> > >                 ...
> > >         }
> > >         ...
> > > fail:
> > >         warn_alloc(gfp_mask, ac->nodemask,
> > >                         "page allocation failure: order:%u", order);
> > > got_pg:
> > >         return page;
> > > }
> > >
> > > Let's move the memory allocation out of the atomic context and use
> > > the normal sleepable context to get pages.
> > >
> > > [RFT]: This has only been compile-tested; I'd prefer if the VDPA maintainers
> > > handles it.
> > >
> > > Cc: "Michael S. Tsirkin" <mst@redhat.com>
> > > Cc: Jason Wang <jasowang@redhat.com>
> > > Cc: Xuan Zhuo <xuanzhuo@linux.alibaba.com>
> > > Cc: "Eugenio Pérez" <eperezma@redhat.com>
> > > Cc: Maxime Coquelin <maxime.coquelin@redhat.com>
> > > Signed-off-by: Barry Song <v-songbaohua@oppo.com>
> > > ---
> > >  drivers/vdpa/vdpa_user/iova_domain.c | 31 +++++++++++++++++++++++-----
> > >  drivers/vdpa/vdpa_user/iova_domain.h |  5 ++++-
> > >  drivers/vdpa/vdpa_user/vduse_dev.c   |  4 +++-
> > >  3 files changed, 33 insertions(+), 7 deletions(-)
> > >
> > > diff --git a/drivers/vdpa/vdpa_user/iova_domain.c b/drivers/vdpa/vdpa_user/iova_domain.c
> > > index 791d38d6284c..9318f059a8b5 100644
> > > --- a/drivers/vdpa/vdpa_user/iova_domain.c
> > > +++ b/drivers/vdpa/vdpa_user/iova_domain.c
> > > @@ -283,7 +283,23 @@ int vduse_domain_add_user_bounce_pages(struct vduse_iova_domain *domain,
> > >         return ret;
> > >  }
> > >
> > > -void vduse_domain_remove_user_bounce_pages(struct vduse_iova_domain *domain)
> > > +struct page **vduse_domain_alloc_pages_to_remove_bounce(struct vduse_iova_domain *domain)
> > > +{
> > > +       struct page **pages;
> > > +       unsigned long count, i;
> > > +
> > > +       if (!domain->user_bounce_pages)
> > > +               return NULL;
> > > +
> > > +       count = domain->bounce_size >> PAGE_SHIFT;
> > > +       pages = kmalloc_array(count, sizeof(*pages), GFP_KERNEL | __GFP_NOFAIL);
> > > +       for (i = 0; i < count; i++)
> > > +               pages[i] = alloc_page(GFP_KERNEL | __GFP_NOFAIL);
> > > +
> > > +       return pages;
> > > +}
> > > +
> > > +void vduse_domain_remove_user_bounce_pages(struct vduse_iova_domain *domain, struct page **pages)
> > >  {
> > >         struct vduse_bounce_map *map;
> > >         unsigned long i, count;
> > > @@ -294,15 +310,16 @@ void vduse_domain_remove_user_bounce_pages(struct vduse_iova_domain *domain)
> > >
> > >         count = domain->bounce_size >> PAGE_SHIFT;
> > >         for (i = 0; i < count; i++) {
> > > -               struct page *page = NULL;
> > > +               struct page *page = pages[i];
> > >
> > >                 map = &domain->bounce_maps[i];
> > > -               if (WARN_ON(!map->bounce_page))
> > > +               if (WARN_ON(!map->bounce_page)) {
> > > +                       put_page(page);
> > >                         continue;
> > > +               }
> > >
> > >                 /* Copy user page to kernel page if it's in use */
> > >                 if (map->orig_phys != INVALID_PHYS_ADDR) {
> > > -                       page = alloc_page(GFP_ATOMIC | __GFP_NOFAIL);
> > >                         memcpy_from_page(page_address(page),
> > >                                          map->bounce_page, 0, PAGE_SIZE);
> > >                 }
> > > @@ -310,6 +327,7 @@ void vduse_domain_remove_user_bounce_pages(struct vduse_iova_domain *domain)
> > >                 map->bounce_page = page;
> > >         }
> > >         domain->user_bounce_pages = false;
> > > +       kfree(pages);
> > >  out:
> > >         write_unlock(&domain->bounce_lock);
> > >  }
> > > @@ -543,10 +561,13 @@ static int vduse_domain_mmap(struct file *file, struct vm_area_struct *vma)
> > >  static int vduse_domain_release(struct inode *inode, struct file *file)
> > >  {
> > >         struct vduse_iova_domain *domain = file->private_data;
> > > +       struct page **pages;
> > > +
> > > +       pages = vduse_domain_alloc_pages_to_remove_bounce(domain);
> > >
> > >         spin_lock(&domain->iotlb_lock);
> > >         vduse_iotlb_del_range(domain, 0, ULLONG_MAX);
> > > -       vduse_domain_remove_user_bounce_pages(domain);
> > > +       vduse_domain_remove_user_bounce_pages(domain, pages);
> > >         vduse_domain_free_kernel_bounce_pages(domain);
> > >         spin_unlock(&domain->iotlb_lock);
> > >         put_iova_domain(&domain->stream_iovad);
> > > diff --git a/drivers/vdpa/vdpa_user/iova_domain.h b/drivers/vdpa/vdpa_user/iova_domain.h
> > > index f92f22a7267d..17efa5555b3f 100644
> > > --- a/drivers/vdpa/vdpa_user/iova_domain.h
> > > +++ b/drivers/vdpa/vdpa_user/iova_domain.h
> > > @@ -74,7 +74,10 @@ void vduse_domain_reset_bounce_map(struct vduse_iova_domain *domain);
> > >  int vduse_domain_add_user_bounce_pages(struct vduse_iova_domain *domain,
> > >                                        struct page **pages, int count);
> > >
> > > -void vduse_domain_remove_user_bounce_pages(struct vduse_iova_domain *domain);
> > > +void vduse_domain_remove_user_bounce_pages(struct vduse_iova_domain *domain,
> > > +                                          struct page **pages);
> > > +
> > > +struct page **vduse_domain_alloc_pages_to_remove_bounce(struct vduse_iova_domain *domain);
> > >
> > >  void vduse_domain_destroy(struct vduse_iova_domain *domain);
> > >
> > > diff --git a/drivers/vdpa/vdpa_user/vduse_dev.c b/drivers/vdpa/vdpa_user/vduse_dev.c
> > > index 7ae99691efdf..5d8d5810df57 100644
> > > --- a/drivers/vdpa/vdpa_user/vduse_dev.c
> > > +++ b/drivers/vdpa/vdpa_user/vduse_dev.c
> > > @@ -1030,6 +1030,7 @@ static int vduse_dev_queue_irq_work(struct vduse_dev *dev,
> > >  static int vduse_dev_dereg_umem(struct vduse_dev *dev,
> > >                                 u64 iova, u64 size)
> > >  {
> > > +       struct page **pages;
> > >         int ret;
> > >
> > >         mutex_lock(&dev->mem_lock);
> > > @@ -1044,7 +1045,8 @@ static int vduse_dev_dereg_umem(struct vduse_dev *dev,
> > >         if (dev->umem->iova != iova || size != dev->domain->bounce_size)
> > >                 goto unlock;
> > >
> > > -       vduse_domain_remove_user_bounce_pages(dev->domain);
> > > +       pages = vduse_domain_alloc_pages_to_remove_bounce(dev->domain);
> > > +       vduse_domain_remove_user_bounce_pages(dev->domain, pages);
> > >         unpin_user_pages_dirty_lock(dev->umem->pages,
> > >                                     dev->umem->npages, true);
> > >         atomic64_sub(dev->umem->npages, &dev->umem->mm->pinned_vm);
> >
> > We miss a kfree(pages); here?
> no.
> i've moved it into vduse_domain_remove_user_bounce_pages.

Ok, but it seems tricky e.g allocated by the caller but freed in
callee. And I think I missed some important issues in the previous
review: The check of user_bounce_pages must be done under the
bounce_lock, otherwise it might race with umem_reg.

So in the case of release(), we know the device is gone, so there's no
need to allocate pages that will be released soon. So we can pass NULL
as a hint and just assign bounce_page to NULL in
vduse_domain_remove_user_bounce_pages().

And in the case of vduse_dev_dereg_umem(), we need to allocate the
pages without checking user_bounce_pages. So in
vduse_domain_remove_user_bounce_pages() if we can free the allocated
pages as well as the pages in the following check

        if (!domain->user_bounce_pages)
                goto out;

What do you think?

Thanks

>
> >
> > Thanks
> >
> > > --
> > > 2.34.1
> > >
> >
>
Barry Song July 31, 2024, 4:11 a.m. UTC | #4
On Wed, Jul 31, 2024 at 11:58 AM Jason Wang <jasowang@redhat.com> wrote:
>
> On Wed, Jul 31, 2024 at 11:15 AM Barry Song <21cnbao@gmail.com> wrote:
> >
> > On Wed, Jul 31, 2024 at 11:10 AM Jason Wang <jasowang@redhat.com> wrote:
> > >
> > > On Wed, Jul 31, 2024 at 8:03 AM Barry Song <21cnbao@gmail.com> wrote:
> > > >
> > > > From: Barry Song <v-songbaohua@oppo.com>
> > > >
> > > > mm doesn't support non-blockable __GFP_NOFAIL allocation. Because
> > > > __GFP_NOFAIL without direct reclamation may just result in a busy
> > > > loop within non-sleepable contexts.
> > > >
> > > > static inline struct page *
> > > > __alloc_pages_slowpath(gfp_t gfp_mask, unsigned int order,
> > > >                                                 struct alloc_context *ac)
> > > > {
> > > >         ...
> > > >         /*
> > > >          * Make sure that __GFP_NOFAIL request doesn't leak out and make sure
> > > >          * we always retry
> > > >          */
> > > >         if (gfp_mask & __GFP_NOFAIL) {
> > > >                 /*
> > > >                  * All existing users of the __GFP_NOFAIL are blockable, so warn
> > > >                  * of any new users that actually require GFP_NOWAIT
> > > >                  */
> > > >                 if (WARN_ON_ONCE_GFP(!can_direct_reclaim, gfp_mask))
> > > >                         goto fail;
> > > >                 ...
> > > >         }
> > > >         ...
> > > > fail:
> > > >         warn_alloc(gfp_mask, ac->nodemask,
> > > >                         "page allocation failure: order:%u", order);
> > > > got_pg:
> > > >         return page;
> > > > }
> > > >
> > > > Let's move the memory allocation out of the atomic context and use
> > > > the normal sleepable context to get pages.
> > > >
> > > > [RFT]: This has only been compile-tested; I'd prefer if the VDPA maintainers
> > > > handles it.
> > > >
> > > > Cc: "Michael S. Tsirkin" <mst@redhat.com>
> > > > Cc: Jason Wang <jasowang@redhat.com>
> > > > Cc: Xuan Zhuo <xuanzhuo@linux.alibaba.com>
> > > > Cc: "Eugenio Pérez" <eperezma@redhat.com>
> > > > Cc: Maxime Coquelin <maxime.coquelin@redhat.com>
> > > > Signed-off-by: Barry Song <v-songbaohua@oppo.com>
> > > > ---
> > > >  drivers/vdpa/vdpa_user/iova_domain.c | 31 +++++++++++++++++++++++-----
> > > >  drivers/vdpa/vdpa_user/iova_domain.h |  5 ++++-
> > > >  drivers/vdpa/vdpa_user/vduse_dev.c   |  4 +++-
> > > >  3 files changed, 33 insertions(+), 7 deletions(-)
> > > >
> > > > diff --git a/drivers/vdpa/vdpa_user/iova_domain.c b/drivers/vdpa/vdpa_user/iova_domain.c
> > > > index 791d38d6284c..9318f059a8b5 100644
> > > > --- a/drivers/vdpa/vdpa_user/iova_domain.c
> > > > +++ b/drivers/vdpa/vdpa_user/iova_domain.c
> > > > @@ -283,7 +283,23 @@ int vduse_domain_add_user_bounce_pages(struct vduse_iova_domain *domain,
> > > >         return ret;
> > > >  }
> > > >
> > > > -void vduse_domain_remove_user_bounce_pages(struct vduse_iova_domain *domain)
> > > > +struct page **vduse_domain_alloc_pages_to_remove_bounce(struct vduse_iova_domain *domain)
> > > > +{
> > > > +       struct page **pages;
> > > > +       unsigned long count, i;
> > > > +
> > > > +       if (!domain->user_bounce_pages)
> > > > +               return NULL;
> > > > +
> > > > +       count = domain->bounce_size >> PAGE_SHIFT;
> > > > +       pages = kmalloc_array(count, sizeof(*pages), GFP_KERNEL | __GFP_NOFAIL);
> > > > +       for (i = 0; i < count; i++)
> > > > +               pages[i] = alloc_page(GFP_KERNEL | __GFP_NOFAIL);
> > > > +
> > > > +       return pages;
> > > > +}
> > > > +
> > > > +void vduse_domain_remove_user_bounce_pages(struct vduse_iova_domain *domain, struct page **pages)
> > > >  {
> > > >         struct vduse_bounce_map *map;
> > > >         unsigned long i, count;
> > > > @@ -294,15 +310,16 @@ void vduse_domain_remove_user_bounce_pages(struct vduse_iova_domain *domain)
> > > >
> > > >         count = domain->bounce_size >> PAGE_SHIFT;
> > > >         for (i = 0; i < count; i++) {
> > > > -               struct page *page = NULL;
> > > > +               struct page *page = pages[i];
> > > >
> > > >                 map = &domain->bounce_maps[i];
> > > > -               if (WARN_ON(!map->bounce_page))
> > > > +               if (WARN_ON(!map->bounce_page)) {
> > > > +                       put_page(page);
> > > >                         continue;
> > > > +               }
> > > >
> > > >                 /* Copy user page to kernel page if it's in use */
> > > >                 if (map->orig_phys != INVALID_PHYS_ADDR) {
> > > > -                       page = alloc_page(GFP_ATOMIC | __GFP_NOFAIL);
> > > >                         memcpy_from_page(page_address(page),
> > > >                                          map->bounce_page, 0, PAGE_SIZE);
> > > >                 }
> > > > @@ -310,6 +327,7 @@ void vduse_domain_remove_user_bounce_pages(struct vduse_iova_domain *domain)
> > > >                 map->bounce_page = page;
> > > >         }
> > > >         domain->user_bounce_pages = false;
> > > > +       kfree(pages);
> > > >  out:
> > > >         write_unlock(&domain->bounce_lock);
> > > >  }
> > > > @@ -543,10 +561,13 @@ static int vduse_domain_mmap(struct file *file, struct vm_area_struct *vma)
> > > >  static int vduse_domain_release(struct inode *inode, struct file *file)
> > > >  {
> > > >         struct vduse_iova_domain *domain = file->private_data;
> > > > +       struct page **pages;
> > > > +
> > > > +       pages = vduse_domain_alloc_pages_to_remove_bounce(domain);
> > > >
> > > >         spin_lock(&domain->iotlb_lock);
> > > >         vduse_iotlb_del_range(domain, 0, ULLONG_MAX);
> > > > -       vduse_domain_remove_user_bounce_pages(domain);
> > > > +       vduse_domain_remove_user_bounce_pages(domain, pages);
> > > >         vduse_domain_free_kernel_bounce_pages(domain);
> > > >         spin_unlock(&domain->iotlb_lock);
> > > >         put_iova_domain(&domain->stream_iovad);
> > > > diff --git a/drivers/vdpa/vdpa_user/iova_domain.h b/drivers/vdpa/vdpa_user/iova_domain.h
> > > > index f92f22a7267d..17efa5555b3f 100644
> > > > --- a/drivers/vdpa/vdpa_user/iova_domain.h
> > > > +++ b/drivers/vdpa/vdpa_user/iova_domain.h
> > > > @@ -74,7 +74,10 @@ void vduse_domain_reset_bounce_map(struct vduse_iova_domain *domain);
> > > >  int vduse_domain_add_user_bounce_pages(struct vduse_iova_domain *domain,
> > > >                                        struct page **pages, int count);
> > > >
> > > > -void vduse_domain_remove_user_bounce_pages(struct vduse_iova_domain *domain);
> > > > +void vduse_domain_remove_user_bounce_pages(struct vduse_iova_domain *domain,
> > > > +                                          struct page **pages);
> > > > +
> > > > +struct page **vduse_domain_alloc_pages_to_remove_bounce(struct vduse_iova_domain *domain);
> > > >
> > > >  void vduse_domain_destroy(struct vduse_iova_domain *domain);
> > > >
> > > > diff --git a/drivers/vdpa/vdpa_user/vduse_dev.c b/drivers/vdpa/vdpa_user/vduse_dev.c
> > > > index 7ae99691efdf..5d8d5810df57 100644
> > > > --- a/drivers/vdpa/vdpa_user/vduse_dev.c
> > > > +++ b/drivers/vdpa/vdpa_user/vduse_dev.c
> > > > @@ -1030,6 +1030,7 @@ static int vduse_dev_queue_irq_work(struct vduse_dev *dev,
> > > >  static int vduse_dev_dereg_umem(struct vduse_dev *dev,
> > > >                                 u64 iova, u64 size)
> > > >  {
> > > > +       struct page **pages;
> > > >         int ret;
> > > >
> > > >         mutex_lock(&dev->mem_lock);
> > > > @@ -1044,7 +1045,8 @@ static int vduse_dev_dereg_umem(struct vduse_dev *dev,
> > > >         if (dev->umem->iova != iova || size != dev->domain->bounce_size)
> > > >                 goto unlock;
> > > >
> > > > -       vduse_domain_remove_user_bounce_pages(dev->domain);
> > > > +       pages = vduse_domain_alloc_pages_to_remove_bounce(dev->domain);
> > > > +       vduse_domain_remove_user_bounce_pages(dev->domain, pages);
> > > >         unpin_user_pages_dirty_lock(dev->umem->pages,
> > > >                                     dev->umem->npages, true);
> > > >         atomic64_sub(dev->umem->npages, &dev->umem->mm->pinned_vm);
> > >
> > > We miss a kfree(pages); here?
> > no.
> > i've moved it into vduse_domain_remove_user_bounce_pages.
>
> Ok, but it seems tricky e.g allocated by the caller but freed in
> callee. And I think I missed some important issues in the previous
> review: The check of user_bounce_pages must be done under the
> bounce_lock, otherwise it might race with umem_reg.
>
> So in the case of release(), we know the device is gone, so there's no
> need to allocate pages that will be released soon. So we can pass NULL
> as a hint and just assign bounce_page to NULL in
> vduse_domain_remove_user_bounce_pages().
>
> And in the case of vduse_dev_dereg_umem(), we need to allocate the
> pages without checking user_bounce_pages. So in
> vduse_domain_remove_user_bounce_pages() if we can free the allocated
> pages as well as the pages in the following check
>
>         if (!domain->user_bounce_pages)
>                 goto out;
>
> What do you think?

I am not a vdpa guy, but  changing the current logic is another patch.
From mm perspective, I can only address the __GFP_NOFAIL issue.

I actually prefer you guys handle it  directly:-) I'd rather report a BUG
instead. TBH, I know nothing about vpda.

>
> Thanks
>
> >
> > >
> > > Thanks
> > >
> > > > --
> > > > 2.34.1
> > > >
> > >
Thanks
Barry
Jason Wang July 31, 2024, 4:13 a.m. UTC | #5
On Wed, Jul 31, 2024 at 12:12 PM Barry Song <21cnbao@gmail.com> wrote:
>
> On Wed, Jul 31, 2024 at 11:58 AM Jason Wang <jasowang@redhat.com> wrote:
> >
> > On Wed, Jul 31, 2024 at 11:15 AM Barry Song <21cnbao@gmail.com> wrote:
> > >
> > > On Wed, Jul 31, 2024 at 11:10 AM Jason Wang <jasowang@redhat.com> wrote:
> > > >
> > > > On Wed, Jul 31, 2024 at 8:03 AM Barry Song <21cnbao@gmail.com> wrote:
> > > > >
> > > > > From: Barry Song <v-songbaohua@oppo.com>
> > > > >
> > > > > mm doesn't support non-blockable __GFP_NOFAIL allocation. Because
> > > > > __GFP_NOFAIL without direct reclamation may just result in a busy
> > > > > loop within non-sleepable contexts.
> > > > >
> > > > > static inline struct page *
> > > > > __alloc_pages_slowpath(gfp_t gfp_mask, unsigned int order,
> > > > >                                                 struct alloc_context *ac)
> > > > > {
> > > > >         ...
> > > > >         /*
> > > > >          * Make sure that __GFP_NOFAIL request doesn't leak out and make sure
> > > > >          * we always retry
> > > > >          */
> > > > >         if (gfp_mask & __GFP_NOFAIL) {
> > > > >                 /*
> > > > >                  * All existing users of the __GFP_NOFAIL are blockable, so warn
> > > > >                  * of any new users that actually require GFP_NOWAIT
> > > > >                  */
> > > > >                 if (WARN_ON_ONCE_GFP(!can_direct_reclaim, gfp_mask))
> > > > >                         goto fail;
> > > > >                 ...
> > > > >         }
> > > > >         ...
> > > > > fail:
> > > > >         warn_alloc(gfp_mask, ac->nodemask,
> > > > >                         "page allocation failure: order:%u", order);
> > > > > got_pg:
> > > > >         return page;
> > > > > }
> > > > >
> > > > > Let's move the memory allocation out of the atomic context and use
> > > > > the normal sleepable context to get pages.
> > > > >
> > > > > [RFT]: This has only been compile-tested; I'd prefer if the VDPA maintainers
> > > > > handles it.
> > > > >
> > > > > Cc: "Michael S. Tsirkin" <mst@redhat.com>
> > > > > Cc: Jason Wang <jasowang@redhat.com>
> > > > > Cc: Xuan Zhuo <xuanzhuo@linux.alibaba.com>
> > > > > Cc: "Eugenio Pérez" <eperezma@redhat.com>
> > > > > Cc: Maxime Coquelin <maxime.coquelin@redhat.com>
> > > > > Signed-off-by: Barry Song <v-songbaohua@oppo.com>
> > > > > ---
> > > > >  drivers/vdpa/vdpa_user/iova_domain.c | 31 +++++++++++++++++++++++-----
> > > > >  drivers/vdpa/vdpa_user/iova_domain.h |  5 ++++-
> > > > >  drivers/vdpa/vdpa_user/vduse_dev.c   |  4 +++-
> > > > >  3 files changed, 33 insertions(+), 7 deletions(-)
> > > > >
> > > > > diff --git a/drivers/vdpa/vdpa_user/iova_domain.c b/drivers/vdpa/vdpa_user/iova_domain.c
> > > > > index 791d38d6284c..9318f059a8b5 100644
> > > > > --- a/drivers/vdpa/vdpa_user/iova_domain.c
> > > > > +++ b/drivers/vdpa/vdpa_user/iova_domain.c
> > > > > @@ -283,7 +283,23 @@ int vduse_domain_add_user_bounce_pages(struct vduse_iova_domain *domain,
> > > > >         return ret;
> > > > >  }
> > > > >
> > > > > -void vduse_domain_remove_user_bounce_pages(struct vduse_iova_domain *domain)
> > > > > +struct page **vduse_domain_alloc_pages_to_remove_bounce(struct vduse_iova_domain *domain)
> > > > > +{
> > > > > +       struct page **pages;
> > > > > +       unsigned long count, i;
> > > > > +
> > > > > +       if (!domain->user_bounce_pages)
> > > > > +               return NULL;
> > > > > +
> > > > > +       count = domain->bounce_size >> PAGE_SHIFT;
> > > > > +       pages = kmalloc_array(count, sizeof(*pages), GFP_KERNEL | __GFP_NOFAIL);
> > > > > +       for (i = 0; i < count; i++)
> > > > > +               pages[i] = alloc_page(GFP_KERNEL | __GFP_NOFAIL);
> > > > > +
> > > > > +       return pages;
> > > > > +}
> > > > > +
> > > > > +void vduse_domain_remove_user_bounce_pages(struct vduse_iova_domain *domain, struct page **pages)
> > > > >  {
> > > > >         struct vduse_bounce_map *map;
> > > > >         unsigned long i, count;
> > > > > @@ -294,15 +310,16 @@ void vduse_domain_remove_user_bounce_pages(struct vduse_iova_domain *domain)
> > > > >
> > > > >         count = domain->bounce_size >> PAGE_SHIFT;
> > > > >         for (i = 0; i < count; i++) {
> > > > > -               struct page *page = NULL;
> > > > > +               struct page *page = pages[i];
> > > > >
> > > > >                 map = &domain->bounce_maps[i];
> > > > > -               if (WARN_ON(!map->bounce_page))
> > > > > +               if (WARN_ON(!map->bounce_page)) {
> > > > > +                       put_page(page);
> > > > >                         continue;
> > > > > +               }
> > > > >
> > > > >                 /* Copy user page to kernel page if it's in use */
> > > > >                 if (map->orig_phys != INVALID_PHYS_ADDR) {
> > > > > -                       page = alloc_page(GFP_ATOMIC | __GFP_NOFAIL);
> > > > >                         memcpy_from_page(page_address(page),
> > > > >                                          map->bounce_page, 0, PAGE_SIZE);
> > > > >                 }
> > > > > @@ -310,6 +327,7 @@ void vduse_domain_remove_user_bounce_pages(struct vduse_iova_domain *domain)
> > > > >                 map->bounce_page = page;
> > > > >         }
> > > > >         domain->user_bounce_pages = false;
> > > > > +       kfree(pages);
> > > > >  out:
> > > > >         write_unlock(&domain->bounce_lock);
> > > > >  }
> > > > > @@ -543,10 +561,13 @@ static int vduse_domain_mmap(struct file *file, struct vm_area_struct *vma)
> > > > >  static int vduse_domain_release(struct inode *inode, struct file *file)
> > > > >  {
> > > > >         struct vduse_iova_domain *domain = file->private_data;
> > > > > +       struct page **pages;
> > > > > +
> > > > > +       pages = vduse_domain_alloc_pages_to_remove_bounce(domain);
> > > > >
> > > > >         spin_lock(&domain->iotlb_lock);
> > > > >         vduse_iotlb_del_range(domain, 0, ULLONG_MAX);
> > > > > -       vduse_domain_remove_user_bounce_pages(domain);
> > > > > +       vduse_domain_remove_user_bounce_pages(domain, pages);
> > > > >         vduse_domain_free_kernel_bounce_pages(domain);
> > > > >         spin_unlock(&domain->iotlb_lock);
> > > > >         put_iova_domain(&domain->stream_iovad);
> > > > > diff --git a/drivers/vdpa/vdpa_user/iova_domain.h b/drivers/vdpa/vdpa_user/iova_domain.h
> > > > > index f92f22a7267d..17efa5555b3f 100644
> > > > > --- a/drivers/vdpa/vdpa_user/iova_domain.h
> > > > > +++ b/drivers/vdpa/vdpa_user/iova_domain.h
> > > > > @@ -74,7 +74,10 @@ void vduse_domain_reset_bounce_map(struct vduse_iova_domain *domain);
> > > > >  int vduse_domain_add_user_bounce_pages(struct vduse_iova_domain *domain,
> > > > >                                        struct page **pages, int count);
> > > > >
> > > > > -void vduse_domain_remove_user_bounce_pages(struct vduse_iova_domain *domain);
> > > > > +void vduse_domain_remove_user_bounce_pages(struct vduse_iova_domain *domain,
> > > > > +                                          struct page **pages);
> > > > > +
> > > > > +struct page **vduse_domain_alloc_pages_to_remove_bounce(struct vduse_iova_domain *domain);
> > > > >
> > > > >  void vduse_domain_destroy(struct vduse_iova_domain *domain);
> > > > >
> > > > > diff --git a/drivers/vdpa/vdpa_user/vduse_dev.c b/drivers/vdpa/vdpa_user/vduse_dev.c
> > > > > index 7ae99691efdf..5d8d5810df57 100644
> > > > > --- a/drivers/vdpa/vdpa_user/vduse_dev.c
> > > > > +++ b/drivers/vdpa/vdpa_user/vduse_dev.c
> > > > > @@ -1030,6 +1030,7 @@ static int vduse_dev_queue_irq_work(struct vduse_dev *dev,
> > > > >  static int vduse_dev_dereg_umem(struct vduse_dev *dev,
> > > > >                                 u64 iova, u64 size)
> > > > >  {
> > > > > +       struct page **pages;
> > > > >         int ret;
> > > > >
> > > > >         mutex_lock(&dev->mem_lock);
> > > > > @@ -1044,7 +1045,8 @@ static int vduse_dev_dereg_umem(struct vduse_dev *dev,
> > > > >         if (dev->umem->iova != iova || size != dev->domain->bounce_size)
> > > > >                 goto unlock;
> > > > >
> > > > > -       vduse_domain_remove_user_bounce_pages(dev->domain);
> > > > > +       pages = vduse_domain_alloc_pages_to_remove_bounce(dev->domain);
> > > > > +       vduse_domain_remove_user_bounce_pages(dev->domain, pages);
> > > > >         unpin_user_pages_dirty_lock(dev->umem->pages,
> > > > >                                     dev->umem->npages, true);
> > > > >         atomic64_sub(dev->umem->npages, &dev->umem->mm->pinned_vm);
> > > >
> > > > We miss a kfree(pages); here?
> > > no.
> > > i've moved it into vduse_domain_remove_user_bounce_pages.
> >
> > Ok, but it seems tricky e.g allocated by the caller but freed in
> > callee. And I think I missed some important issues in the previous
> > review: The check of user_bounce_pages must be done under the
> > bounce_lock, otherwise it might race with umem_reg.
> >
> > So in the case of release(), we know the device is gone, so there's no
> > need to allocate pages that will be released soon. So we can pass NULL
> > as a hint and just assign bounce_page to NULL in
> > vduse_domain_remove_user_bounce_pages().
> >
> > And in the case of vduse_dev_dereg_umem(), we need to allocate the
> > pages without checking user_bounce_pages. So in
> > vduse_domain_remove_user_bounce_pages() if we can free the allocated
> > pages as well as the pages in the following check
> >
> >         if (!domain->user_bounce_pages)
> >                 goto out;
> >
> > What do you think?
>
> I am not a vdpa guy, but  changing the current logic is another patch.
> From mm perspective, I can only address the __GFP_NOFAIL issue.
>
> I actually prefer you guys handle it  directly:-) I'd rather report a BUG
> instead. TBH, I know nothing about vpda.

Fine, let me post a patch for this (no later than the end of this week).

Thanks

>
> >
> > Thanks
> >
> > >
> > > >
> > > > Thanks
> > > >
> > > > > --
> > > > > 2.34.1
> > > > >
> > > >
> Thanks
> Barry
>
Barry Song July 31, 2024, 5:05 a.m. UTC | #6
On Wed, Jul 31, 2024 at 12:13 PM Jason Wang <jasowang@redhat.com> wrote:
>
> On Wed, Jul 31, 2024 at 12:12 PM Barry Song <21cnbao@gmail.com> wrote:
> >
> > On Wed, Jul 31, 2024 at 11:58 AM Jason Wang <jasowang@redhat.com> wrote:
> > >
> > > On Wed, Jul 31, 2024 at 11:15 AM Barry Song <21cnbao@gmail.com> wrote:
> > > >
> > > > On Wed, Jul 31, 2024 at 11:10 AM Jason Wang <jasowang@redhat.com> wrote:
> > > > >
> > > > > On Wed, Jul 31, 2024 at 8:03 AM Barry Song <21cnbao@gmail.com> wrote:
> > > > > >
> > > > > > From: Barry Song <v-songbaohua@oppo.com>
> > > > > >
> > > > > > mm doesn't support non-blockable __GFP_NOFAIL allocation. Because
> > > > > > __GFP_NOFAIL without direct reclamation may just result in a busy
> > > > > > loop within non-sleepable contexts.
> > > > > >
> > > > > > static inline struct page *
> > > > > > __alloc_pages_slowpath(gfp_t gfp_mask, unsigned int order,
> > > > > >                                                 struct alloc_context *ac)
> > > > > > {
> > > > > >         ...
> > > > > >         /*
> > > > > >          * Make sure that __GFP_NOFAIL request doesn't leak out and make sure
> > > > > >          * we always retry
> > > > > >          */
> > > > > >         if (gfp_mask & __GFP_NOFAIL) {
> > > > > >                 /*
> > > > > >                  * All existing users of the __GFP_NOFAIL are blockable, so warn
> > > > > >                  * of any new users that actually require GFP_NOWAIT
> > > > > >                  */
> > > > > >                 if (WARN_ON_ONCE_GFP(!can_direct_reclaim, gfp_mask))
> > > > > >                         goto fail;
> > > > > >                 ...
> > > > > >         }
> > > > > >         ...
> > > > > > fail:
> > > > > >         warn_alloc(gfp_mask, ac->nodemask,
> > > > > >                         "page allocation failure: order:%u", order);
> > > > > > got_pg:
> > > > > >         return page;
> > > > > > }
> > > > > >
> > > > > > Let's move the memory allocation out of the atomic context and use
> > > > > > the normal sleepable context to get pages.
> > > > > >
> > > > > > [RFT]: This has only been compile-tested; I'd prefer if the VDPA maintainers
> > > > > > handles it.
> > > > > >
> > > > > > Cc: "Michael S. Tsirkin" <mst@redhat.com>
> > > > > > Cc: Jason Wang <jasowang@redhat.com>
> > > > > > Cc: Xuan Zhuo <xuanzhuo@linux.alibaba.com>
> > > > > > Cc: "Eugenio Pérez" <eperezma@redhat.com>
> > > > > > Cc: Maxime Coquelin <maxime.coquelin@redhat.com>
> > > > > > Signed-off-by: Barry Song <v-songbaohua@oppo.com>
> > > > > > ---
> > > > > >  drivers/vdpa/vdpa_user/iova_domain.c | 31 +++++++++++++++++++++++-----
> > > > > >  drivers/vdpa/vdpa_user/iova_domain.h |  5 ++++-
> > > > > >  drivers/vdpa/vdpa_user/vduse_dev.c   |  4 +++-
> > > > > >  3 files changed, 33 insertions(+), 7 deletions(-)
> > > > > >
> > > > > > diff --git a/drivers/vdpa/vdpa_user/iova_domain.c b/drivers/vdpa/vdpa_user/iova_domain.c
> > > > > > index 791d38d6284c..9318f059a8b5 100644
> > > > > > --- a/drivers/vdpa/vdpa_user/iova_domain.c
> > > > > > +++ b/drivers/vdpa/vdpa_user/iova_domain.c
> > > > > > @@ -283,7 +283,23 @@ int vduse_domain_add_user_bounce_pages(struct vduse_iova_domain *domain,
> > > > > >         return ret;
> > > > > >  }
> > > > > >
> > > > > > -void vduse_domain_remove_user_bounce_pages(struct vduse_iova_domain *domain)
> > > > > > +struct page **vduse_domain_alloc_pages_to_remove_bounce(struct vduse_iova_domain *domain)
> > > > > > +{
> > > > > > +       struct page **pages;
> > > > > > +       unsigned long count, i;
> > > > > > +
> > > > > > +       if (!domain->user_bounce_pages)
> > > > > > +               return NULL;
> > > > > > +
> > > > > > +       count = domain->bounce_size >> PAGE_SHIFT;
> > > > > > +       pages = kmalloc_array(count, sizeof(*pages), GFP_KERNEL | __GFP_NOFAIL);
> > > > > > +       for (i = 0; i < count; i++)
> > > > > > +               pages[i] = alloc_page(GFP_KERNEL | __GFP_NOFAIL);
> > > > > > +
> > > > > > +       return pages;
> > > > > > +}
> > > > > > +
> > > > > > +void vduse_domain_remove_user_bounce_pages(struct vduse_iova_domain *domain, struct page **pages)
> > > > > >  {
> > > > > >         struct vduse_bounce_map *map;
> > > > > >         unsigned long i, count;
> > > > > > @@ -294,15 +310,16 @@ void vduse_domain_remove_user_bounce_pages(struct vduse_iova_domain *domain)
> > > > > >
> > > > > >         count = domain->bounce_size >> PAGE_SHIFT;
> > > > > >         for (i = 0; i < count; i++) {
> > > > > > -               struct page *page = NULL;
> > > > > > +               struct page *page = pages[i];
> > > > > >
> > > > > >                 map = &domain->bounce_maps[i];
> > > > > > -               if (WARN_ON(!map->bounce_page))
> > > > > > +               if (WARN_ON(!map->bounce_page)) {
> > > > > > +                       put_page(page);
> > > > > >                         continue;
> > > > > > +               }
> > > > > >
> > > > > >                 /* Copy user page to kernel page if it's in use */
> > > > > >                 if (map->orig_phys != INVALID_PHYS_ADDR) {
> > > > > > -                       page = alloc_page(GFP_ATOMIC | __GFP_NOFAIL);
> > > > > >                         memcpy_from_page(page_address(page),
> > > > > >                                          map->bounce_page, 0, PAGE_SIZE);
> > > > > >                 }
> > > > > > @@ -310,6 +327,7 @@ void vduse_domain_remove_user_bounce_pages(struct vduse_iova_domain *domain)
> > > > > >                 map->bounce_page = page;
> > > > > >         }
> > > > > >         domain->user_bounce_pages = false;
> > > > > > +       kfree(pages);
> > > > > >  out:
> > > > > >         write_unlock(&domain->bounce_lock);
> > > > > >  }
> > > > > > @@ -543,10 +561,13 @@ static int vduse_domain_mmap(struct file *file, struct vm_area_struct *vma)
> > > > > >  static int vduse_domain_release(struct inode *inode, struct file *file)
> > > > > >  {
> > > > > >         struct vduse_iova_domain *domain = file->private_data;
> > > > > > +       struct page **pages;
> > > > > > +
> > > > > > +       pages = vduse_domain_alloc_pages_to_remove_bounce(domain);
> > > > > >
> > > > > >         spin_lock(&domain->iotlb_lock);
> > > > > >         vduse_iotlb_del_range(domain, 0, ULLONG_MAX);
> > > > > > -       vduse_domain_remove_user_bounce_pages(domain);
> > > > > > +       vduse_domain_remove_user_bounce_pages(domain, pages);
> > > > > >         vduse_domain_free_kernel_bounce_pages(domain);
> > > > > >         spin_unlock(&domain->iotlb_lock);
> > > > > >         put_iova_domain(&domain->stream_iovad);
> > > > > > diff --git a/drivers/vdpa/vdpa_user/iova_domain.h b/drivers/vdpa/vdpa_user/iova_domain.h
> > > > > > index f92f22a7267d..17efa5555b3f 100644
> > > > > > --- a/drivers/vdpa/vdpa_user/iova_domain.h
> > > > > > +++ b/drivers/vdpa/vdpa_user/iova_domain.h
> > > > > > @@ -74,7 +74,10 @@ void vduse_domain_reset_bounce_map(struct vduse_iova_domain *domain);
> > > > > >  int vduse_domain_add_user_bounce_pages(struct vduse_iova_domain *domain,
> > > > > >                                        struct page **pages, int count);
> > > > > >
> > > > > > -void vduse_domain_remove_user_bounce_pages(struct vduse_iova_domain *domain);
> > > > > > +void vduse_domain_remove_user_bounce_pages(struct vduse_iova_domain *domain,
> > > > > > +                                          struct page **pages);
> > > > > > +
> > > > > > +struct page **vduse_domain_alloc_pages_to_remove_bounce(struct vduse_iova_domain *domain);
> > > > > >
> > > > > >  void vduse_domain_destroy(struct vduse_iova_domain *domain);
> > > > > >
> > > > > > diff --git a/drivers/vdpa/vdpa_user/vduse_dev.c b/drivers/vdpa/vdpa_user/vduse_dev.c
> > > > > > index 7ae99691efdf..5d8d5810df57 100644
> > > > > > --- a/drivers/vdpa/vdpa_user/vduse_dev.c
> > > > > > +++ b/drivers/vdpa/vdpa_user/vduse_dev.c
> > > > > > @@ -1030,6 +1030,7 @@ static int vduse_dev_queue_irq_work(struct vduse_dev *dev,
> > > > > >  static int vduse_dev_dereg_umem(struct vduse_dev *dev,
> > > > > >                                 u64 iova, u64 size)
> > > > > >  {
> > > > > > +       struct page **pages;
> > > > > >         int ret;
> > > > > >
> > > > > >         mutex_lock(&dev->mem_lock);
> > > > > > @@ -1044,7 +1045,8 @@ static int vduse_dev_dereg_umem(struct vduse_dev *dev,
> > > > > >         if (dev->umem->iova != iova || size != dev->domain->bounce_size)
> > > > > >                 goto unlock;
> > > > > >
> > > > > > -       vduse_domain_remove_user_bounce_pages(dev->domain);
> > > > > > +       pages = vduse_domain_alloc_pages_to_remove_bounce(dev->domain);
> > > > > > +       vduse_domain_remove_user_bounce_pages(dev->domain, pages);
> > > > > >         unpin_user_pages_dirty_lock(dev->umem->pages,
> > > > > >                                     dev->umem->npages, true);
> > > > > >         atomic64_sub(dev->umem->npages, &dev->umem->mm->pinned_vm);
> > > > >
> > > > > We miss a kfree(pages); here?
> > > > no.
> > > > i've moved it into vduse_domain_remove_user_bounce_pages.
> > >
> > > Ok, but it seems tricky e.g allocated by the caller but freed in
> > > callee. And I think I missed some important issues in the previous
> > > review: The check of user_bounce_pages must be done under the
> > > bounce_lock, otherwise it might race with umem_reg.
> > >
> > > So in the case of release(), we know the device is gone, so there's no
> > > need to allocate pages that will be released soon. So we can pass NULL
> > > as a hint and just assign bounce_page to NULL in
> > > vduse_domain_remove_user_bounce_pages().
> > >
> > > And in the case of vduse_dev_dereg_umem(), we need to allocate the
> > > pages without checking user_bounce_pages. So in
> > > vduse_domain_remove_user_bounce_pages() if we can free the allocated
> > > pages as well as the pages in the following check
> > >
> > >         if (!domain->user_bounce_pages)
> > >                 goto out;
> > >
> > > What do you think?
> >
> > I am not a vdpa guy, but  changing the current logic is another patch.
> > From mm perspective, I can only address the __GFP_NOFAIL issue.
> >
> > I actually prefer you guys handle it  directly:-) I'd rather report a BUG
> > instead. TBH, I know nothing about vpda.
>
> Fine, let me post a patch for this (no later than the end of this week).
>
Jason,
Thank you very much. Also, Tetsuo reminded me that kmalloc_array() might be
problematic if the count is too large:
 pages = kmalloc_array(count, sizeof(*pages), GFP_KERNEL | __GFP_NOFAIL);

You might want to consider using vmalloc_array() or kvmalloc_array() instead
when you send a new version.

> Thanks
>
> >
> > >
> > > Thanks
> > >
> > > >
> > > > >
> > > > > Thanks
> > > > >
> > > > > > --
> > > > > > 2.34.1
> > > > > >
> > > > >

Thanks
Barry
Tetsuo Handa July 31, 2024, 10:20 a.m. UTC | #7
On 2024/07/31 14:05, Barry Song wrote:
> Jason,
> Thank you very much. Also, Tetsuo reminded me that kmalloc_array() might be
> problematic if the count is too large:
>  pages = kmalloc_array(count, sizeof(*pages), GFP_KERNEL | __GFP_NOFAIL);

If "count" is guaranteed to be count <= 16, this might be tolerable.

Consider a situation where current thread was chosen as an global OOM victim.
Trying to allocate "count" pages using

	for (i = 0; i < count; i++)
		pages[i] = alloc_page(GFP_KERNEL | __GFP_NOFAIL);

is not good.

> 
> You might want to consider using vmalloc_array() or kvmalloc_array() instead
> when you send a new version.

There is a limitation at https://elixir.bootlin.com/linux/v6.11-rc1/source/mm/page_alloc.c#L3033
that you must satisfy count <= PAGE_SIZE * 2 / sizeof(*pages) if you use __GFP_NOFAIL.

But as already explained above, allocating 1024 pages (assuming PAGE_SIZE is 4096 and
pointer size is 8) when current thread was chosen as an OOM victim is not recommended.
You should implement proper error handling instead of using __GFP_NOFAIL if count can
become large.
Jason Wang Aug. 1, 2024, 2:30 a.m. UTC | #8
On Wed, Jul 31, 2024 at 1:05 PM Barry Song <21cnbao@gmail.com> wrote:
>
> On Wed, Jul 31, 2024 at 12:13 PM Jason Wang <jasowang@redhat.com> wrote:
> >
> > On Wed, Jul 31, 2024 at 12:12 PM Barry Song <21cnbao@gmail.com> wrote:
> > >
> > > On Wed, Jul 31, 2024 at 11:58 AM Jason Wang <jasowang@redhat.com> wrote:
> > > >
> > > > On Wed, Jul 31, 2024 at 11:15 AM Barry Song <21cnbao@gmail.com> wrote:
> > > > >
> > > > > On Wed, Jul 31, 2024 at 11:10 AM Jason Wang <jasowang@redhat.com> wrote:
> > > > > >
> > > > > > On Wed, Jul 31, 2024 at 8:03 AM Barry Song <21cnbao@gmail.com> wrote:
> > > > > > >
> > > > > > > From: Barry Song <v-songbaohua@oppo.com>
> > > > > > >
> > > > > > > mm doesn't support non-blockable __GFP_NOFAIL allocation. Because
> > > > > > > __GFP_NOFAIL without direct reclamation may just result in a busy
> > > > > > > loop within non-sleepable contexts.
> > > > > > >
> > > > > > > static inline struct page *
> > > > > > > __alloc_pages_slowpath(gfp_t gfp_mask, unsigned int order,
> > > > > > >                                                 struct alloc_context *ac)
> > > > > > > {
> > > > > > >         ...
> > > > > > >         /*
> > > > > > >          * Make sure that __GFP_NOFAIL request doesn't leak out and make sure
> > > > > > >          * we always retry
> > > > > > >          */
> > > > > > >         if (gfp_mask & __GFP_NOFAIL) {
> > > > > > >                 /*
> > > > > > >                  * All existing users of the __GFP_NOFAIL are blockable, so warn
> > > > > > >                  * of any new users that actually require GFP_NOWAIT
> > > > > > >                  */
> > > > > > >                 if (WARN_ON_ONCE_GFP(!can_direct_reclaim, gfp_mask))
> > > > > > >                         goto fail;
> > > > > > >                 ...
> > > > > > >         }
> > > > > > >         ...
> > > > > > > fail:
> > > > > > >         warn_alloc(gfp_mask, ac->nodemask,
> > > > > > >                         "page allocation failure: order:%u", order);
> > > > > > > got_pg:
> > > > > > >         return page;
> > > > > > > }
> > > > > > >
> > > > > > > Let's move the memory allocation out of the atomic context and use
> > > > > > > the normal sleepable context to get pages.
> > > > > > >
> > > > > > > [RFT]: This has only been compile-tested; I'd prefer if the VDPA maintainers
> > > > > > > handles it.
> > > > > > >
> > > > > > > Cc: "Michael S. Tsirkin" <mst@redhat.com>
> > > > > > > Cc: Jason Wang <jasowang@redhat.com>
> > > > > > > Cc: Xuan Zhuo <xuanzhuo@linux.alibaba.com>
> > > > > > > Cc: "Eugenio Pérez" <eperezma@redhat.com>
> > > > > > > Cc: Maxime Coquelin <maxime.coquelin@redhat.com>
> > > > > > > Signed-off-by: Barry Song <v-songbaohua@oppo.com>
> > > > > > > ---
> > > > > > >  drivers/vdpa/vdpa_user/iova_domain.c | 31 +++++++++++++++++++++++-----
> > > > > > >  drivers/vdpa/vdpa_user/iova_domain.h |  5 ++++-
> > > > > > >  drivers/vdpa/vdpa_user/vduse_dev.c   |  4 +++-
> > > > > > >  3 files changed, 33 insertions(+), 7 deletions(-)
> > > > > > >
> > > > > > > diff --git a/drivers/vdpa/vdpa_user/iova_domain.c b/drivers/vdpa/vdpa_user/iova_domain.c
> > > > > > > index 791d38d6284c..9318f059a8b5 100644
> > > > > > > --- a/drivers/vdpa/vdpa_user/iova_domain.c
> > > > > > > +++ b/drivers/vdpa/vdpa_user/iova_domain.c
> > > > > > > @@ -283,7 +283,23 @@ int vduse_domain_add_user_bounce_pages(struct vduse_iova_domain *domain,
> > > > > > >         return ret;
> > > > > > >  }
> > > > > > >
> > > > > > > -void vduse_domain_remove_user_bounce_pages(struct vduse_iova_domain *domain)
> > > > > > > +struct page **vduse_domain_alloc_pages_to_remove_bounce(struct vduse_iova_domain *domain)
> > > > > > > +{
> > > > > > > +       struct page **pages;
> > > > > > > +       unsigned long count, i;
> > > > > > > +
> > > > > > > +       if (!domain->user_bounce_pages)
> > > > > > > +               return NULL;
> > > > > > > +
> > > > > > > +       count = domain->bounce_size >> PAGE_SHIFT;
> > > > > > > +       pages = kmalloc_array(count, sizeof(*pages), GFP_KERNEL | __GFP_NOFAIL);
> > > > > > > +       for (i = 0; i < count; i++)
> > > > > > > +               pages[i] = alloc_page(GFP_KERNEL | __GFP_NOFAIL);
> > > > > > > +
> > > > > > > +       return pages;
> > > > > > > +}
> > > > > > > +
> > > > > > > +void vduse_domain_remove_user_bounce_pages(struct vduse_iova_domain *domain, struct page **pages)
> > > > > > >  {
> > > > > > >         struct vduse_bounce_map *map;
> > > > > > >         unsigned long i, count;
> > > > > > > @@ -294,15 +310,16 @@ void vduse_domain_remove_user_bounce_pages(struct vduse_iova_domain *domain)
> > > > > > >
> > > > > > >         count = domain->bounce_size >> PAGE_SHIFT;
> > > > > > >         for (i = 0; i < count; i++) {
> > > > > > > -               struct page *page = NULL;
> > > > > > > +               struct page *page = pages[i];
> > > > > > >
> > > > > > >                 map = &domain->bounce_maps[i];
> > > > > > > -               if (WARN_ON(!map->bounce_page))
> > > > > > > +               if (WARN_ON(!map->bounce_page)) {
> > > > > > > +                       put_page(page);
> > > > > > >                         continue;
> > > > > > > +               }
> > > > > > >
> > > > > > >                 /* Copy user page to kernel page if it's in use */
> > > > > > >                 if (map->orig_phys != INVALID_PHYS_ADDR) {
> > > > > > > -                       page = alloc_page(GFP_ATOMIC | __GFP_NOFAIL);
> > > > > > >                         memcpy_from_page(page_address(page),
> > > > > > >                                          map->bounce_page, 0, PAGE_SIZE);
> > > > > > >                 }
> > > > > > > @@ -310,6 +327,7 @@ void vduse_domain_remove_user_bounce_pages(struct vduse_iova_domain *domain)
> > > > > > >                 map->bounce_page = page;
> > > > > > >         }
> > > > > > >         domain->user_bounce_pages = false;
> > > > > > > +       kfree(pages);
> > > > > > >  out:
> > > > > > >         write_unlock(&domain->bounce_lock);
> > > > > > >  }
> > > > > > > @@ -543,10 +561,13 @@ static int vduse_domain_mmap(struct file *file, struct vm_area_struct *vma)
> > > > > > >  static int vduse_domain_release(struct inode *inode, struct file *file)
> > > > > > >  {
> > > > > > >         struct vduse_iova_domain *domain = file->private_data;
> > > > > > > +       struct page **pages;
> > > > > > > +
> > > > > > > +       pages = vduse_domain_alloc_pages_to_remove_bounce(domain);
> > > > > > >
> > > > > > >         spin_lock(&domain->iotlb_lock);
> > > > > > >         vduse_iotlb_del_range(domain, 0, ULLONG_MAX);
> > > > > > > -       vduse_domain_remove_user_bounce_pages(domain);
> > > > > > > +       vduse_domain_remove_user_bounce_pages(domain, pages);
> > > > > > >         vduse_domain_free_kernel_bounce_pages(domain);
> > > > > > >         spin_unlock(&domain->iotlb_lock);
> > > > > > >         put_iova_domain(&domain->stream_iovad);
> > > > > > > diff --git a/drivers/vdpa/vdpa_user/iova_domain.h b/drivers/vdpa/vdpa_user/iova_domain.h
> > > > > > > index f92f22a7267d..17efa5555b3f 100644
> > > > > > > --- a/drivers/vdpa/vdpa_user/iova_domain.h
> > > > > > > +++ b/drivers/vdpa/vdpa_user/iova_domain.h
> > > > > > > @@ -74,7 +74,10 @@ void vduse_domain_reset_bounce_map(struct vduse_iova_domain *domain);
> > > > > > >  int vduse_domain_add_user_bounce_pages(struct vduse_iova_domain *domain,
> > > > > > >                                        struct page **pages, int count);
> > > > > > >
> > > > > > > -void vduse_domain_remove_user_bounce_pages(struct vduse_iova_domain *domain);
> > > > > > > +void vduse_domain_remove_user_bounce_pages(struct vduse_iova_domain *domain,
> > > > > > > +                                          struct page **pages);
> > > > > > > +
> > > > > > > +struct page **vduse_domain_alloc_pages_to_remove_bounce(struct vduse_iova_domain *domain);
> > > > > > >
> > > > > > >  void vduse_domain_destroy(struct vduse_iova_domain *domain);
> > > > > > >
> > > > > > > diff --git a/drivers/vdpa/vdpa_user/vduse_dev.c b/drivers/vdpa/vdpa_user/vduse_dev.c
> > > > > > > index 7ae99691efdf..5d8d5810df57 100644
> > > > > > > --- a/drivers/vdpa/vdpa_user/vduse_dev.c
> > > > > > > +++ b/drivers/vdpa/vdpa_user/vduse_dev.c
> > > > > > > @@ -1030,6 +1030,7 @@ static int vduse_dev_queue_irq_work(struct vduse_dev *dev,
> > > > > > >  static int vduse_dev_dereg_umem(struct vduse_dev *dev,
> > > > > > >                                 u64 iova, u64 size)
> > > > > > >  {
> > > > > > > +       struct page **pages;
> > > > > > >         int ret;
> > > > > > >
> > > > > > >         mutex_lock(&dev->mem_lock);
> > > > > > > @@ -1044,7 +1045,8 @@ static int vduse_dev_dereg_umem(struct vduse_dev *dev,
> > > > > > >         if (dev->umem->iova != iova || size != dev->domain->bounce_size)
> > > > > > >                 goto unlock;
> > > > > > >
> > > > > > > -       vduse_domain_remove_user_bounce_pages(dev->domain);
> > > > > > > +       pages = vduse_domain_alloc_pages_to_remove_bounce(dev->domain);
> > > > > > > +       vduse_domain_remove_user_bounce_pages(dev->domain, pages);
> > > > > > >         unpin_user_pages_dirty_lock(dev->umem->pages,
> > > > > > >                                     dev->umem->npages, true);
> > > > > > >         atomic64_sub(dev->umem->npages, &dev->umem->mm->pinned_vm);
> > > > > >
> > > > > > We miss a kfree(pages); here?
> > > > > no.
> > > > > i've moved it into vduse_domain_remove_user_bounce_pages.
> > > >
> > > > Ok, but it seems tricky e.g allocated by the caller but freed in
> > > > callee. And I think I missed some important issues in the previous
> > > > review: The check of user_bounce_pages must be done under the
> > > > bounce_lock, otherwise it might race with umem_reg.
> > > >
> > > > So in the case of release(), we know the device is gone, so there's no
> > > > need to allocate pages that will be released soon. So we can pass NULL
> > > > as a hint and just assign bounce_page to NULL in
> > > > vduse_domain_remove_user_bounce_pages().
> > > >
> > > > And in the case of vduse_dev_dereg_umem(), we need to allocate the
> > > > pages without checking user_bounce_pages. So in
> > > > vduse_domain_remove_user_bounce_pages() if we can free the allocated
> > > > pages as well as the pages in the following check
> > > >
> > > >         if (!domain->user_bounce_pages)
> > > >                 goto out;
> > > >
> > > > What do you think?
> > >
> > > I am not a vdpa guy, but  changing the current logic is another patch.
> > > From mm perspective, I can only address the __GFP_NOFAIL issue.
> > >
> > > I actually prefer you guys handle it  directly:-) I'd rather report a BUG
> > > instead. TBH, I know nothing about vpda.
> >
> > Fine, let me post a patch for this (no later than the end of this week).
> >
> Jason,
> Thank you very much. Also, Tetsuo reminded me that kmalloc_array() might be
> problematic if the count is too large:
>  pages = kmalloc_array(count, sizeof(*pages), GFP_KERNEL | __GFP_NOFAIL);
>

I think it's a side effect of __GFP_NOFAIL?

> You might want to consider using vmalloc_array() or kvmalloc_array() instead
> when you send a new version.

Will consider that.

Thanks


>
> > Thanks
> >
> > >
> > > >
> > > > Thanks
> > > >
> > > > >
> > > > > >
> > > > > > Thanks
> > > > > >
> > > > > > > --
> > > > > > > 2.34.1
> > > > > > >
> > > > > >
>
> Thanks
> Barry
>
Jason Wang Aug. 1, 2024, 2:37 a.m. UTC | #9
On Wed, Jul 31, 2024 at 6:21 PM Tetsuo Handa
<penguin-kernel@i-love.sakura.ne.jp> wrote:
>
> On 2024/07/31 14:05, Barry Song wrote:
> > Jason,
> > Thank you very much. Also, Tetsuo reminded me that kmalloc_array() might be
> > problematic if the count is too large:
> >  pages = kmalloc_array(count, sizeof(*pages), GFP_KERNEL | __GFP_NOFAIL);
>
> If "count" is guaranteed to be count <= 16, this might be tolerable.

It's not unfortunately, the maximum bounce buffer size is:

#define VDUSE_MAX_BOUNCE_SIZE (1024 * 1024 * 1024)

>
> Consider a situation where current thread was chosen as an global OOM victim.
> Trying to allocate "count" pages using
>
>         for (i = 0; i < count; i++)
>                 pages[i] = alloc_page(GFP_KERNEL | __GFP_NOFAIL);
>
> is not good.

Right, I wonder if we need to add a shrink to reclaim the pages that
belong to VDUSE bounce pages.

>
> >
> > You might want to consider using vmalloc_array() or kvmalloc_array() instead
> > when you send a new version.
>
> There is a limitation at https://elixir.bootlin.com/linux/v6.11-rc1/source/mm/page_alloc.c#L3033
> that you must satisfy count <= PAGE_SIZE * 2 / sizeof(*pages) if you use __GFP_NOFAIL.
>
> But as already explained above, allocating 1024 pages (assuming PAGE_SIZE is 4096 and
> pointer size is 8) when current thread was chosen as an OOM victim is not recommended.
> You should implement proper error handling instead of using __GFP_NOFAIL if count can
> become large.

I think I need to consider a way to avoid __GFP_NOFAIL. A easy way is
not to free the kernel bounce pages, then we don't need to allocate
them again.

Thanks

>
>
Barry Song Aug. 5, 2024, 1:32 a.m. UTC | #10
On Thu, Aug 1, 2024 at 2:37 PM Jason Wang <jasowang@redhat.com> wrote:
>
> On Wed, Jul 31, 2024 at 6:21 PM Tetsuo Handa
> <penguin-kernel@i-love.sakura.ne.jp> wrote:
> >
> > On 2024/07/31 14:05, Barry Song wrote:
> > > Jason,
> > > Thank you very much. Also, Tetsuo reminded me that kmalloc_array() might be
> > > problematic if the count is too large:
> > >  pages = kmalloc_array(count, sizeof(*pages), GFP_KERNEL | __GFP_NOFAIL);
> >
> > If "count" is guaranteed to be count <= 16, this might be tolerable.
>
> It's not unfortunately, the maximum bounce buffer size is:
>
> #define VDUSE_MAX_BOUNCE_SIZE (1024 * 1024 * 1024)
>
> >
> > Consider a situation where current thread was chosen as an global OOM victim.
> > Trying to allocate "count" pages using
> >
> >         for (i = 0; i < count; i++)
> >                 pages[i] = alloc_page(GFP_KERNEL | __GFP_NOFAIL);
> >
> > is not good.
>
> Right, I wonder if we need to add a shrink to reclaim the pages that
> belong to VDUSE bounce pages.
>
> >
> > >
> > > You might want to consider using vmalloc_array() or kvmalloc_array() instead
> > > when you send a new version.
> >
> > There is a limitation at https://elixir.bootlin.com/linux/v6.11-rc1/source/mm/page_alloc.c#L3033
> > that you must satisfy count <= PAGE_SIZE * 2 / sizeof(*pages) if you use __GFP_NOFAIL.
> >
> > But as already explained above, allocating 1024 pages (assuming PAGE_SIZE is 4096 and
> > pointer size is 8) when current thread was chosen as an OOM victim is not recommended.
> > You should implement proper error handling instead of using __GFP_NOFAIL if count can
> > become large.
>
> I think I need to consider a way to avoid __GFP_NOFAIL. A easy way is
> not to free the kernel bounce pages, then we don't need to allocate
> them again.

Let's try to do a fix for this patch as we are waiting for your official patch
in mm.

I guess, further optimization can be a separate patch later in the driver's
tree :-)

>
> Thanks
>
> >
> >
>
Jason Wang Aug. 5, 2024, 8:19 a.m. UTC | #11
On Mon, Aug 5, 2024 at 9:32 AM Barry Song <21cnbao@gmail.com> wrote:
>
> On Thu, Aug 1, 2024 at 2:37 PM Jason Wang <jasowang@redhat.com> wrote:
> >
> > On Wed, Jul 31, 2024 at 6:21 PM Tetsuo Handa
> > <penguin-kernel@i-love.sakura.ne.jp> wrote:
> > >
> > > On 2024/07/31 14:05, Barry Song wrote:
> > > > Jason,
> > > > Thank you very much. Also, Tetsuo reminded me that kmalloc_array() might be
> > > > problematic if the count is too large:
> > > >  pages = kmalloc_array(count, sizeof(*pages), GFP_KERNEL | __GFP_NOFAIL);
> > >
> > > If "count" is guaranteed to be count <= 16, this might be tolerable.
> >
> > It's not unfortunately, the maximum bounce buffer size is:
> >
> > #define VDUSE_MAX_BOUNCE_SIZE (1024 * 1024 * 1024)
> >
> > >
> > > Consider a situation where current thread was chosen as an global OOM victim.
> > > Trying to allocate "count" pages using
> > >
> > >         for (i = 0; i < count; i++)
> > >                 pages[i] = alloc_page(GFP_KERNEL | __GFP_NOFAIL);
> > >
> > > is not good.
> >
> > Right, I wonder if we need to add a shrink to reclaim the pages that
> > belong to VDUSE bounce pages.
> >
> > >
> > > >
> > > > You might want to consider using vmalloc_array() or kvmalloc_array() instead
> > > > when you send a new version.
> > >
> > > There is a limitation at https://elixir.bootlin.com/linux/v6.11-rc1/source/mm/page_alloc.c#L3033
> > > that you must satisfy count <= PAGE_SIZE * 2 / sizeof(*pages) if you use __GFP_NOFAIL.
> > >
> > > But as already explained above, allocating 1024 pages (assuming PAGE_SIZE is 4096 and
> > > pointer size is 8) when current thread was chosen as an OOM victim is not recommended.
> > > You should implement proper error handling instead of using __GFP_NOFAIL if count can
> > > become large.
> >
> > I think I need to consider a way to avoid __GFP_NOFAIL. A easy way is
> > not to free the kernel bounce pages, then we don't need to allocate
> > them again.
>
> Let's try to do a fix for this patch as we are waiting for your official patch
> in mm.

Will post this soon. One note here is that I don't have handy
usersapce that uses userspace bounce pages (neither libvduse nor DPDK
did that).

I hope YongJi can review and give a test on that.

Thanks

>
> I guess, further optimization can be a separate patch later in the driver's
> tree :-)
>
> >
> > Thanks
> >
> > >
> > >
> >
>
diff mbox series

Patch

diff --git a/drivers/vdpa/vdpa_user/iova_domain.c b/drivers/vdpa/vdpa_user/iova_domain.c
index 791d38d6284c..9318f059a8b5 100644
--- a/drivers/vdpa/vdpa_user/iova_domain.c
+++ b/drivers/vdpa/vdpa_user/iova_domain.c
@@ -283,7 +283,23 @@  int vduse_domain_add_user_bounce_pages(struct vduse_iova_domain *domain,
 	return ret;
 }
 
-void vduse_domain_remove_user_bounce_pages(struct vduse_iova_domain *domain)
+struct page **vduse_domain_alloc_pages_to_remove_bounce(struct vduse_iova_domain *domain)
+{
+	struct page **pages;
+	unsigned long count, i;
+
+	if (!domain->user_bounce_pages)
+		return NULL;
+
+	count = domain->bounce_size >> PAGE_SHIFT;
+	pages = kmalloc_array(count, sizeof(*pages), GFP_KERNEL | __GFP_NOFAIL);
+	for (i = 0; i < count; i++)
+		pages[i] = alloc_page(GFP_KERNEL | __GFP_NOFAIL);
+
+	return pages;
+}
+
+void vduse_domain_remove_user_bounce_pages(struct vduse_iova_domain *domain, struct page **pages)
 {
 	struct vduse_bounce_map *map;
 	unsigned long i, count;
@@ -294,15 +310,16 @@  void vduse_domain_remove_user_bounce_pages(struct vduse_iova_domain *domain)
 
 	count = domain->bounce_size >> PAGE_SHIFT;
 	for (i = 0; i < count; i++) {
-		struct page *page = NULL;
+		struct page *page = pages[i];
 
 		map = &domain->bounce_maps[i];
-		if (WARN_ON(!map->bounce_page))
+		if (WARN_ON(!map->bounce_page)) {
+			put_page(page);
 			continue;
+		}
 
 		/* Copy user page to kernel page if it's in use */
 		if (map->orig_phys != INVALID_PHYS_ADDR) {
-			page = alloc_page(GFP_ATOMIC | __GFP_NOFAIL);
 			memcpy_from_page(page_address(page),
 					 map->bounce_page, 0, PAGE_SIZE);
 		}
@@ -310,6 +327,7 @@  void vduse_domain_remove_user_bounce_pages(struct vduse_iova_domain *domain)
 		map->bounce_page = page;
 	}
 	domain->user_bounce_pages = false;
+	kfree(pages);
 out:
 	write_unlock(&domain->bounce_lock);
 }
@@ -543,10 +561,13 @@  static int vduse_domain_mmap(struct file *file, struct vm_area_struct *vma)
 static int vduse_domain_release(struct inode *inode, struct file *file)
 {
 	struct vduse_iova_domain *domain = file->private_data;
+	struct page **pages;
+
+	pages = vduse_domain_alloc_pages_to_remove_bounce(domain);
 
 	spin_lock(&domain->iotlb_lock);
 	vduse_iotlb_del_range(domain, 0, ULLONG_MAX);
-	vduse_domain_remove_user_bounce_pages(domain);
+	vduse_domain_remove_user_bounce_pages(domain, pages);
 	vduse_domain_free_kernel_bounce_pages(domain);
 	spin_unlock(&domain->iotlb_lock);
 	put_iova_domain(&domain->stream_iovad);
diff --git a/drivers/vdpa/vdpa_user/iova_domain.h b/drivers/vdpa/vdpa_user/iova_domain.h
index f92f22a7267d..17efa5555b3f 100644
--- a/drivers/vdpa/vdpa_user/iova_domain.h
+++ b/drivers/vdpa/vdpa_user/iova_domain.h
@@ -74,7 +74,10 @@  void vduse_domain_reset_bounce_map(struct vduse_iova_domain *domain);
 int vduse_domain_add_user_bounce_pages(struct vduse_iova_domain *domain,
 				       struct page **pages, int count);
 
-void vduse_domain_remove_user_bounce_pages(struct vduse_iova_domain *domain);
+void vduse_domain_remove_user_bounce_pages(struct vduse_iova_domain *domain,
+					   struct page **pages);
+
+struct page **vduse_domain_alloc_pages_to_remove_bounce(struct vduse_iova_domain *domain);
 
 void vduse_domain_destroy(struct vduse_iova_domain *domain);
 
diff --git a/drivers/vdpa/vdpa_user/vduse_dev.c b/drivers/vdpa/vdpa_user/vduse_dev.c
index 7ae99691efdf..5d8d5810df57 100644
--- a/drivers/vdpa/vdpa_user/vduse_dev.c
+++ b/drivers/vdpa/vdpa_user/vduse_dev.c
@@ -1030,6 +1030,7 @@  static int vduse_dev_queue_irq_work(struct vduse_dev *dev,
 static int vduse_dev_dereg_umem(struct vduse_dev *dev,
 				u64 iova, u64 size)
 {
+	struct page **pages;
 	int ret;
 
 	mutex_lock(&dev->mem_lock);
@@ -1044,7 +1045,8 @@  static int vduse_dev_dereg_umem(struct vduse_dev *dev,
 	if (dev->umem->iova != iova || size != dev->domain->bounce_size)
 		goto unlock;
 
-	vduse_domain_remove_user_bounce_pages(dev->domain);
+	pages = vduse_domain_alloc_pages_to_remove_bounce(dev->domain);
+	vduse_domain_remove_user_bounce_pages(dev->domain, pages);
 	unpin_user_pages_dirty_lock(dev->umem->pages,
 				    dev->umem->npages, true);
 	atomic64_sub(dev->umem->npages, &dev->umem->mm->pinned_vm);