diff mbox series

[RFC,1/5] vpda: try to fix the potential crash due to misusing __GFP_NOFAIL

Message ID 20240724085544.299090-2-21cnbao@gmail.com (mailing list archive)
State New
Headers show
Series mm: clarify nofail memory allocation | expand

Commit Message

Barry Song July 24, 2024, 8:55 a.m. UTC
From: Barry Song <v-songbaohua@oppo.com>

mm doesn't support non-blockable __GFP_NOFAIL allocation. Because
__GFP_NOFAIL without direct reclamation may just result in a busy
loop within non-sleepable contexts.

static inline struct page *
__alloc_pages_slowpath(gfp_t gfp_mask, unsigned int order,
                                                struct alloc_context *ac)
{
        ...
        /*
         * Make sure that __GFP_NOFAIL request doesn't leak out and make sure
         * we always retry
         */
        if (gfp_mask & __GFP_NOFAIL) {
                /*
                 * All existing users of the __GFP_NOFAIL are blockable, so warn
                 * of any new users that actually require GFP_NOWAIT
                 */
                if (WARN_ON_ONCE_GFP(!can_direct_reclaim, gfp_mask))
                        goto fail;
                ...
        }
        ...
fail:
        warn_alloc(gfp_mask, ac->nodemask,
                        "page allocation failure: order:%u", order);
got_pg:
        return page;
}

Let's move the memory allocation out of the atomic context and use
the normal sleepable context to get pages.

[RFC]: This has only been compile-tested; I'd prefer if the VDPA maintainers
handles it.

Cc: "Michael S. Tsirkin" <mst@redhat.com>
Cc: Jason Wang <jasowang@redhat.com>
Cc: Xuan Zhuo <xuanzhuo@linux.alibaba.com>
Cc: "Eugenio Pérez" <eperezma@redhat.com>
Cc: Maxime Coquelin <maxime.coquelin@redhat.com>
Signed-off-by: Barry Song <v-songbaohua@oppo.com>
---
 drivers/vdpa/vdpa_user/iova_domain.c | 24 ++++++++++++++++++++----
 1 file changed, 20 insertions(+), 4 deletions(-)

Comments

Michal Hocko July 24, 2024, 12:26 p.m. UTC | #1
On Wed 24-07-24 20:55:40, Barry Song wrote:
> From: Barry Song <v-songbaohua@oppo.com>
> 
> mm doesn't support non-blockable __GFP_NOFAIL allocation. Because
> __GFP_NOFAIL without direct reclamation may just result in a busy
> loop within non-sleepable contexts.
> 
> static inline struct page *
> __alloc_pages_slowpath(gfp_t gfp_mask, unsigned int order,
>                                                 struct alloc_context *ac)
> {
>         ...
>         /*
>          * Make sure that __GFP_NOFAIL request doesn't leak out and make sure
>          * we always retry
>          */
>         if (gfp_mask & __GFP_NOFAIL) {
>                 /*
>                  * All existing users of the __GFP_NOFAIL are blockable, so warn
>                  * of any new users that actually require GFP_NOWAIT
>                  */
>                 if (WARN_ON_ONCE_GFP(!can_direct_reclaim, gfp_mask))
>                         goto fail;
>                 ...
>         }
>         ...
> fail:
>         warn_alloc(gfp_mask, ac->nodemask,
>                         "page allocation failure: order:%u", order);
> got_pg:
>         return page;
> }
> 
> Let's move the memory allocation out of the atomic context and use
> the normal sleepable context to get pages.
> 
> [RFC]: This has only been compile-tested; I'd prefer if the VDPA maintainers
> handles it.
> 
> Cc: "Michael S. Tsirkin" <mst@redhat.com>
> Cc: Jason Wang <jasowang@redhat.com>
> Cc: Xuan Zhuo <xuanzhuo@linux.alibaba.com>
> Cc: "Eugenio Pérez" <eperezma@redhat.com>
> Cc: Maxime Coquelin <maxime.coquelin@redhat.com>
> Signed-off-by: Barry Song <v-songbaohua@oppo.com>
> ---
>  drivers/vdpa/vdpa_user/iova_domain.c | 24 ++++++++++++++++++++----
>  1 file changed, 20 insertions(+), 4 deletions(-)
> 
> diff --git a/drivers/vdpa/vdpa_user/iova_domain.c b/drivers/vdpa/vdpa_user/iova_domain.c
> index 791d38d6284c..eff700e5f7a2 100644
> --- a/drivers/vdpa/vdpa_user/iova_domain.c
> +++ b/drivers/vdpa/vdpa_user/iova_domain.c
> @@ -287,28 +287,44 @@ void vduse_domain_remove_user_bounce_pages(struct vduse_iova_domain *domain)
>  {
>  	struct vduse_bounce_map *map;
>  	unsigned long i, count;
> +	struct page **pages = NULL;
>  
>  	write_lock(&domain->bounce_lock);
>  	if (!domain->user_bounce_pages)
>  		goto out;
> -
>  	count = domain->bounce_size >> PAGE_SHIFT;
> +	write_unlock(&domain->bounce_lock);
> +
> +	pages = kmalloc_array(count, sizeof(*pages), GFP_KERNEL | __GFP_NOFAIL);
> +	for (i = 0; i < count; i++)
> +		pages[i] = alloc_page(GFP_KERNEL | __GFP_NOFAIL);

AFAICS vduse_domain_release calls this function with
spin_lock(&domain->iotlb_lock) so dropping &domain->bounce_lock is not
sufficient.
Barry Song July 24, 2024, 10:50 p.m. UTC | #2
On Thu, Jul 25, 2024 at 12:27 AM Michal Hocko <mhocko@suse.com> wrote:
>
> On Wed 24-07-24 20:55:40, Barry Song wrote:
> > From: Barry Song <v-songbaohua@oppo.com>
> >
> > mm doesn't support non-blockable __GFP_NOFAIL allocation. Because
> > __GFP_NOFAIL without direct reclamation may just result in a busy
> > loop within non-sleepable contexts.
> >
> > static inline struct page *
> > __alloc_pages_slowpath(gfp_t gfp_mask, unsigned int order,
> >                                                 struct alloc_context *ac)
> > {
> >         ...
> >         /*
> >          * Make sure that __GFP_NOFAIL request doesn't leak out and make sure
> >          * we always retry
> >          */
> >         if (gfp_mask & __GFP_NOFAIL) {
> >                 /*
> >                  * All existing users of the __GFP_NOFAIL are blockable, so warn
> >                  * of any new users that actually require GFP_NOWAIT
> >                  */
> >                 if (WARN_ON_ONCE_GFP(!can_direct_reclaim, gfp_mask))
> >                         goto fail;
> >                 ...
> >         }
> >         ...
> > fail:
> >         warn_alloc(gfp_mask, ac->nodemask,
> >                         "page allocation failure: order:%u", order);
> > got_pg:
> >         return page;
> > }
> >
> > Let's move the memory allocation out of the atomic context and use
> > the normal sleepable context to get pages.
> >
> > [RFC]: This has only been compile-tested; I'd prefer if the VDPA maintainers
> > handles it.
> >
> > Cc: "Michael S. Tsirkin" <mst@redhat.com>
> > Cc: Jason Wang <jasowang@redhat.com>
> > Cc: Xuan Zhuo <xuanzhuo@linux.alibaba.com>
> > Cc: "Eugenio Pérez" <eperezma@redhat.com>
> > Cc: Maxime Coquelin <maxime.coquelin@redhat.com>
> > Signed-off-by: Barry Song <v-songbaohua@oppo.com>
> > ---
> >  drivers/vdpa/vdpa_user/iova_domain.c | 24 ++++++++++++++++++++----
> >  1 file changed, 20 insertions(+), 4 deletions(-)
> >
> > diff --git a/drivers/vdpa/vdpa_user/iova_domain.c b/drivers/vdpa/vdpa_user/iova_domain.c
> > index 791d38d6284c..eff700e5f7a2 100644
> > --- a/drivers/vdpa/vdpa_user/iova_domain.c
> > +++ b/drivers/vdpa/vdpa_user/iova_domain.c
> > @@ -287,28 +287,44 @@ void vduse_domain_remove_user_bounce_pages(struct vduse_iova_domain *domain)
> >  {
> >       struct vduse_bounce_map *map;
> >       unsigned long i, count;
> > +     struct page **pages = NULL;
> >
> >       write_lock(&domain->bounce_lock);
> >       if (!domain->user_bounce_pages)
> >               goto out;
> > -
> >       count = domain->bounce_size >> PAGE_SHIFT;
> > +     write_unlock(&domain->bounce_lock);
> > +
> > +     pages = kmalloc_array(count, sizeof(*pages), GFP_KERNEL | __GFP_NOFAIL);
> > +     for (i = 0; i < count; i++)
> > +             pages[i] = alloc_page(GFP_KERNEL | __GFP_NOFAIL);
>
> AFAICS vduse_domain_release calls this function with
> spin_lock(&domain->iotlb_lock) so dropping &domain->bounce_lock is not
> sufficient.

yes. this is true:

static int vduse_domain_release(struct inode *inode, struct file *file)
{
        struct vduse_iova_domain *domain = file->private_data;

        spin_lock(&domain->iotlb_lock);
        vduse_iotlb_del_range(domain, 0, ULLONG_MAX);
        vduse_domain_remove_user_bounce_pages(domain);
        vduse_domain_free_kernel_bounce_pages(domain);
        spin_unlock(&domain->iotlb_lock);
        put_iova_domain(&domain->stream_iovad);
        put_iova_domain(&domain->consistent_iovad);
        vhost_iotlb_free(domain->iotlb);
        vfree(domain->bounce_maps);
        kfree(domain);

        return 0;
}

This is quite a pain. I admit I don't have knowledge of this driver, and I don't
think it's safe to release two locks and then reacquire them. The situation is
rather complex. Therefore, I would prefer if the VDPA maintainers could
take the lead in implementing a proper fix.

>
> --
> Michal Hocko
> SUSE Labs

Thanks
Barry
Michal Hocko July 25, 2024, 6:08 a.m. UTC | #3
On Thu 25-07-24 10:50:45, Barry Song wrote:
> On Thu, Jul 25, 2024 at 12:27 AM Michal Hocko <mhocko@suse.com> wrote:
> >
> > On Wed 24-07-24 20:55:40, Barry Song wrote:
[...]
> > > diff --git a/drivers/vdpa/vdpa_user/iova_domain.c b/drivers/vdpa/vdpa_user/iova_domain.c
> > > index 791d38d6284c..eff700e5f7a2 100644
> > > --- a/drivers/vdpa/vdpa_user/iova_domain.c
> > > +++ b/drivers/vdpa/vdpa_user/iova_domain.c
> > > @@ -287,28 +287,44 @@ void vduse_domain_remove_user_bounce_pages(struct vduse_iova_domain *domain)
> > >  {
> > >       struct vduse_bounce_map *map;
> > >       unsigned long i, count;
> > > +     struct page **pages = NULL;
> > >
> > >       write_lock(&domain->bounce_lock);
> > >       if (!domain->user_bounce_pages)
> > >               goto out;
> > > -
> > >       count = domain->bounce_size >> PAGE_SHIFT;
> > > +     write_unlock(&domain->bounce_lock);
> > > +
> > > +     pages = kmalloc_array(count, sizeof(*pages), GFP_KERNEL | __GFP_NOFAIL);
> > > +     for (i = 0; i < count; i++)
> > > +             pages[i] = alloc_page(GFP_KERNEL | __GFP_NOFAIL);
> >
> > AFAICS vduse_domain_release calls this function with
> > spin_lock(&domain->iotlb_lock) so dropping &domain->bounce_lock is not
> > sufficient.
> 
> yes. this is true:
> 
> static int vduse_domain_release(struct inode *inode, struct file *file)
> {
>         struct vduse_iova_domain *domain = file->private_data;
> 
>         spin_lock(&domain->iotlb_lock);
>         vduse_iotlb_del_range(domain, 0, ULLONG_MAX);
>         vduse_domain_remove_user_bounce_pages(domain);
>         vduse_domain_free_kernel_bounce_pages(domain);
>         spin_unlock(&domain->iotlb_lock);
>         put_iova_domain(&domain->stream_iovad);
>         put_iova_domain(&domain->consistent_iovad);
>         vhost_iotlb_free(domain->iotlb);
>         vfree(domain->bounce_maps);
>         kfree(domain);
> 
>         return 0;
> }
> 
> This is quite a pain. I admit I don't have knowledge of this driver, and I don't
> think it's safe to release two locks and then reacquire them. The situation is
> rather complex. Therefore, I would prefer if the VDPA maintainers could
> take the lead in implementing a proper fix.

Would it be possible to move all that work to a deferred context?
Barry Song July 25, 2024, 7 a.m. UTC | #4
On Thu, Jul 25, 2024 at 6:08 PM Michal Hocko <mhocko@suse.com> wrote:
>
> On Thu 25-07-24 10:50:45, Barry Song wrote:
> > On Thu, Jul 25, 2024 at 12:27 AM Michal Hocko <mhocko@suse.com> wrote:
> > >
> > > On Wed 24-07-24 20:55:40, Barry Song wrote:
> [...]
> > > > diff --git a/drivers/vdpa/vdpa_user/iova_domain.c b/drivers/vdpa/vdpa_user/iova_domain.c
> > > > index 791d38d6284c..eff700e5f7a2 100644
> > > > --- a/drivers/vdpa/vdpa_user/iova_domain.c
> > > > +++ b/drivers/vdpa/vdpa_user/iova_domain.c
> > > > @@ -287,28 +287,44 @@ void vduse_domain_remove_user_bounce_pages(struct vduse_iova_domain *domain)
> > > >  {
> > > >       struct vduse_bounce_map *map;
> > > >       unsigned long i, count;
> > > > +     struct page **pages = NULL;
> > > >
> > > >       write_lock(&domain->bounce_lock);
> > > >       if (!domain->user_bounce_pages)
> > > >               goto out;
> > > > -
> > > >       count = domain->bounce_size >> PAGE_SHIFT;
> > > > +     write_unlock(&domain->bounce_lock);
> > > > +
> > > > +     pages = kmalloc_array(count, sizeof(*pages), GFP_KERNEL | __GFP_NOFAIL);
> > > > +     for (i = 0; i < count; i++)
> > > > +             pages[i] = alloc_page(GFP_KERNEL | __GFP_NOFAIL);
> > >
> > > AFAICS vduse_domain_release calls this function with
> > > spin_lock(&domain->iotlb_lock) so dropping &domain->bounce_lock is not
> > > sufficient.
> >
> > yes. this is true:
> >
> > static int vduse_domain_release(struct inode *inode, struct file *file)
> > {
> >         struct vduse_iova_domain *domain = file->private_data;
> >
> >         spin_lock(&domain->iotlb_lock);
> >         vduse_iotlb_del_range(domain, 0, ULLONG_MAX);
> >         vduse_domain_remove_user_bounce_pages(domain);
> >         vduse_domain_free_kernel_bounce_pages(domain);
> >         spin_unlock(&domain->iotlb_lock);
> >         put_iova_domain(&domain->stream_iovad);
> >         put_iova_domain(&domain->consistent_iovad);
> >         vhost_iotlb_free(domain->iotlb);
> >         vfree(domain->bounce_maps);
> >         kfree(domain);
> >
> >         return 0;
> > }
> >
> > This is quite a pain. I admit I don't have knowledge of this driver, and I don't
> > think it's safe to release two locks and then reacquire them. The situation is
> > rather complex. Therefore, I would prefer if the VDPA maintainers could
> > take the lead in implementing a proper fix.
>
> Would it be possible to move all that work to a deferred context?

My understanding is that we need to be aware of both the iotlb_lock and
bounce_lock to implement the correct changes. As long as we still need
to acquire these two locks in a deferred context, there doesn't seem to
be any difference.

I can do the memory pre-allocation before spin_lock(&domain->iotlb_lock),
but I have no knowledge whether the "count" will change after I make
the preallocation.

diff --git a/drivers/vdpa/vdpa_user/iova_domain.c
b/drivers/vdpa/vdpa_user/iova_domain.c
index 791d38d6284c..7ec87ef33d42 100644
--- a/drivers/vdpa/vdpa_user/iova_domain.c
+++ b/drivers/vdpa/vdpa_user/iova_domain.c
@@ -544,9 +544,12 @@ static int vduse_domain_release(struct inode
*inode, struct file *file)
 {
        struct vduse_iova_domain *domain = file->private_data;

+      struct page **pages;
+      spin_lock(&domain->iotlb_lock); maybe also + bounce_lock?
+      count = domain->bounce_size >> PAGE_SHIFT;
+      spin_unlock(&domain->iotlb_lock);
+
+       preallocate_count_pages(pages, count);
+
....
        spin_lock(&domain->iotlb_lock);
        vduse_iotlb_del_range(domain, 0, ULLONG_MAX);
-       vduse_domain_remove_user_bounce_pages(domain);
+       vduse_domain_remove_user_bounce_pages(domain, pages);
        vduse_domain_free_kernel_bounce_pages(domain);
        spin_unlock(&domain->iotlb_lock);
        put_iova_domain(&domain->stream_iovad);


> --
> Michal Hocko
> SUSE Labs
Jason Wang July 29, 2024, 3:42 a.m. UTC | #5
On Thu, Jul 25, 2024 at 3:00 PM Barry Song <21cnbao@gmail.com> wrote:
>
> On Thu, Jul 25, 2024 at 6:08 PM Michal Hocko <mhocko@suse.com> wrote:
> >
> > On Thu 25-07-24 10:50:45, Barry Song wrote:
> > > On Thu, Jul 25, 2024 at 12:27 AM Michal Hocko <mhocko@suse.com> wrote:
> > > >
> > > > On Wed 24-07-24 20:55:40, Barry Song wrote:
> > [...]
> > > > > diff --git a/drivers/vdpa/vdpa_user/iova_domain.c b/drivers/vdpa/vdpa_user/iova_domain.c
> > > > > index 791d38d6284c..eff700e5f7a2 100644
> > > > > --- a/drivers/vdpa/vdpa_user/iova_domain.c
> > > > > +++ b/drivers/vdpa/vdpa_user/iova_domain.c
> > > > > @@ -287,28 +287,44 @@ void vduse_domain_remove_user_bounce_pages(struct vduse_iova_domain *domain)
> > > > >  {
> > > > >       struct vduse_bounce_map *map;
> > > > >       unsigned long i, count;
> > > > > +     struct page **pages = NULL;
> > > > >
> > > > >       write_lock(&domain->bounce_lock);
> > > > >       if (!domain->user_bounce_pages)
> > > > >               goto out;
> > > > > -
> > > > >       count = domain->bounce_size >> PAGE_SHIFT;
> > > > > +     write_unlock(&domain->bounce_lock);
> > > > > +
> > > > > +     pages = kmalloc_array(count, sizeof(*pages), GFP_KERNEL | __GFP_NOFAIL);
> > > > > +     for (i = 0; i < count; i++)
> > > > > +             pages[i] = alloc_page(GFP_KERNEL | __GFP_NOFAIL);
> > > >
> > > > AFAICS vduse_domain_release calls this function with
> > > > spin_lock(&domain->iotlb_lock) so dropping &domain->bounce_lock is not
> > > > sufficient.
> > >
> > > yes. this is true:
> > >
> > > static int vduse_domain_release(struct inode *inode, struct file *file)
> > > {
> > >         struct vduse_iova_domain *domain = file->private_data;
> > >
> > >         spin_lock(&domain->iotlb_lock);
> > >         vduse_iotlb_del_range(domain, 0, ULLONG_MAX);
> > >         vduse_domain_remove_user_bounce_pages(domain);
> > >         vduse_domain_free_kernel_bounce_pages(domain);
> > >         spin_unlock(&domain->iotlb_lock);
> > >         put_iova_domain(&domain->stream_iovad);
> > >         put_iova_domain(&domain->consistent_iovad);
> > >         vhost_iotlb_free(domain->iotlb);
> > >         vfree(domain->bounce_maps);
> > >         kfree(domain);
> > >
> > >         return 0;
> > > }
> > >
> > > This is quite a pain. I admit I don't have knowledge of this driver, and I don't
> > > think it's safe to release two locks and then reacquire them. The situation is
> > > rather complex. Therefore, I would prefer if the VDPA maintainers could
> > > take the lead in implementing a proper fix.
> >
> > Would it be possible to move all that work to a deferred context?
>
> My understanding is that we need to be aware of both the iotlb_lock and
> bounce_lock to implement the correct changes. As long as we still need
> to acquire these two locks in a deferred context, there doesn't seem to
> be any difference.
>
> I can do the memory pre-allocation before spin_lock(&domain->iotlb_lock),
> but I have no knowledge whether the "count" will change after I make
> the preallocation.
>
> diff --git a/drivers/vdpa/vdpa_user/iova_domain.c
> b/drivers/vdpa/vdpa_user/iova_domain.c
> index 791d38d6284c..7ec87ef33d42 100644
> --- a/drivers/vdpa/vdpa_user/iova_domain.c
> +++ b/drivers/vdpa/vdpa_user/iova_domain.c
> @@ -544,9 +544,12 @@ static int vduse_domain_release(struct inode
> *inode, struct file *file)
>  {
>         struct vduse_iova_domain *domain = file->private_data;
>
> +      struct page **pages;
> +      spin_lock(&domain->iotlb_lock); maybe also + bounce_lock?
> +      count = domain->bounce_size >> PAGE_SHIFT;
> +      spin_unlock(&domain->iotlb_lock);

We probably don't need any lock here as bounce_size won't be changed .

> +
> +       preallocate_count_pages(pages, count);
> +
> ....
>         spin_lock(&domain->iotlb_lock);
>         vduse_iotlb_del_range(domain, 0, ULLONG_MAX);
> -       vduse_domain_remove_user_bounce_pages(domain);
> +       vduse_domain_remove_user_bounce_pages(domain, pages);
>         vduse_domain_free_kernel_bounce_pages(domain);
>         spin_unlock(&domain->iotlb_lock);
>         put_iova_domain(&domain->stream_iovad);

This seems to work.

Thanks

>
>
> > --
> > Michal Hocko
> > SUSE Labs
>
Barry Song July 29, 2024, 6:05 a.m. UTC | #6
On Mon, Jul 29, 2024 at 3:42 PM Jason Wang <jasowang@redhat.com> wrote:
>
> On Thu, Jul 25, 2024 at 3:00 PM Barry Song <21cnbao@gmail.com> wrote:
> >
> > On Thu, Jul 25, 2024 at 6:08 PM Michal Hocko <mhocko@suse.com> wrote:
> > >
> > > On Thu 25-07-24 10:50:45, Barry Song wrote:
> > > > On Thu, Jul 25, 2024 at 12:27 AM Michal Hocko <mhocko@suse.com> wrote:
> > > > >
> > > > > On Wed 24-07-24 20:55:40, Barry Song wrote:
> > > [...]
> > > > > > diff --git a/drivers/vdpa/vdpa_user/iova_domain.c b/drivers/vdpa/vdpa_user/iova_domain.c
> > > > > > index 791d38d6284c..eff700e5f7a2 100644
> > > > > > --- a/drivers/vdpa/vdpa_user/iova_domain.c
> > > > > > +++ b/drivers/vdpa/vdpa_user/iova_domain.c
> > > > > > @@ -287,28 +287,44 @@ void vduse_domain_remove_user_bounce_pages(struct vduse_iova_domain *domain)
> > > > > >  {
> > > > > >       struct vduse_bounce_map *map;
> > > > > >       unsigned long i, count;
> > > > > > +     struct page **pages = NULL;
> > > > > >
> > > > > >       write_lock(&domain->bounce_lock);
> > > > > >       if (!domain->user_bounce_pages)
> > > > > >               goto out;
> > > > > > -
> > > > > >       count = domain->bounce_size >> PAGE_SHIFT;
> > > > > > +     write_unlock(&domain->bounce_lock);
> > > > > > +
> > > > > > +     pages = kmalloc_array(count, sizeof(*pages), GFP_KERNEL | __GFP_NOFAIL);
> > > > > > +     for (i = 0; i < count; i++)
> > > > > > +             pages[i] = alloc_page(GFP_KERNEL | __GFP_NOFAIL);
> > > > >
> > > > > AFAICS vduse_domain_release calls this function with
> > > > > spin_lock(&domain->iotlb_lock) so dropping &domain->bounce_lock is not
> > > > > sufficient.
> > > >
> > > > yes. this is true:
> > > >
> > > > static int vduse_domain_release(struct inode *inode, struct file *file)
> > > > {
> > > >         struct vduse_iova_domain *domain = file->private_data;
> > > >
> > > >         spin_lock(&domain->iotlb_lock);
> > > >         vduse_iotlb_del_range(domain, 0, ULLONG_MAX);
> > > >         vduse_domain_remove_user_bounce_pages(domain);
> > > >         vduse_domain_free_kernel_bounce_pages(domain);
> > > >         spin_unlock(&domain->iotlb_lock);
> > > >         put_iova_domain(&domain->stream_iovad);
> > > >         put_iova_domain(&domain->consistent_iovad);
> > > >         vhost_iotlb_free(domain->iotlb);
> > > >         vfree(domain->bounce_maps);
> > > >         kfree(domain);
> > > >
> > > >         return 0;
> > > > }
> > > >
> > > > This is quite a pain. I admit I don't have knowledge of this driver, and I don't
> > > > think it's safe to release two locks and then reacquire them. The situation is
> > > > rather complex. Therefore, I would prefer if the VDPA maintainers could
> > > > take the lead in implementing a proper fix.
> > >
> > > Would it be possible to move all that work to a deferred context?
> >
> > My understanding is that we need to be aware of both the iotlb_lock and
> > bounce_lock to implement the correct changes. As long as we still need
> > to acquire these two locks in a deferred context, there doesn't seem to
> > be any difference.
> >
> > I can do the memory pre-allocation before spin_lock(&domain->iotlb_lock),
> > but I have no knowledge whether the "count" will change after I make
> > the preallocation.
> >
> > diff --git a/drivers/vdpa/vdpa_user/iova_domain.c
> > b/drivers/vdpa/vdpa_user/iova_domain.c
> > index 791d38d6284c..7ec87ef33d42 100644
> > --- a/drivers/vdpa/vdpa_user/iova_domain.c
> > +++ b/drivers/vdpa/vdpa_user/iova_domain.c
> > @@ -544,9 +544,12 @@ static int vduse_domain_release(struct inode
> > *inode, struct file *file)
> >  {
> >         struct vduse_iova_domain *domain = file->private_data;
> >
> > +      struct page **pages;
> > +      spin_lock(&domain->iotlb_lock); maybe also + bounce_lock?
> > +      count = domain->bounce_size >> PAGE_SHIFT;
> > +      spin_unlock(&domain->iotlb_lock);
>
> We probably don't need any lock here as bounce_size won't be changed .
>
> > +
> > +       preallocate_count_pages(pages, count);
> > +
> > ....
> >         spin_lock(&domain->iotlb_lock);
> >         vduse_iotlb_del_range(domain, 0, ULLONG_MAX);
> > -       vduse_domain_remove_user_bounce_pages(domain);
> > +       vduse_domain_remove_user_bounce_pages(domain, pages);
> >         vduse_domain_free_kernel_bounce_pages(domain);
> >         spin_unlock(&domain->iotlb_lock);
> >         put_iova_domain(&domain->stream_iovad);
>
> This seems to work.

Thanks, Jason. I personally have no knowledge of vDPA. Could you please help
review and test the patch below?

From 1f3cae091159bfcaffdb4a999a4a8e37db2eacf1 Mon Sep 17 00:00:00 2001
From: Barry Song <v-songbaohua@oppo.com>
Date: Wed, 24 Jul 2024 20:55:40 +1200
Subject: [PATCH RFC v2] vpda: try to fix the potential crash due to misusing
 __GFP_NOFAIL
MIME-Version: 1.0
Content-Type: text/plain; charset=UTF-8
Content-Transfer-Encoding: 8bit

mm doesn't support non-blockable __GFP_NOFAIL allocation. Because
__GFP_NOFAIL without direct reclamation may just result in a busy
loop within non-sleepable contexts.

static inline struct page *
__alloc_pages_slowpath(gfp_t gfp_mask, unsigned int order,
                                                struct alloc_context *ac)
{
        ...
        /*
         * Make sure that __GFP_NOFAIL request doesn't leak out and make sure
         * we always retry
         */
        if (gfp_mask & __GFP_NOFAIL) {
                /*
                 * All existing users of the __GFP_NOFAIL are blockable, so warn
                 * of any new users that actually require GFP_NOWAIT
                 */
                if (WARN_ON_ONCE_GFP(!can_direct_reclaim, gfp_mask))
                        goto fail;
                ...
        }
        ...
fail:
        warn_alloc(gfp_mask, ac->nodemask,
                        "page allocation failure: order:%u", order);
got_pg:
        return page;
}

Let's move the memory allocation out of the atomic context and use
the normal sleepable context to get pages.

[RFC]: This has only been compile-tested; I'd prefer if the VDPA maintainers
handles it.

Cc: "Michael S. Tsirkin" <mst@redhat.com>
Cc: Jason Wang <jasowang@redhat.com>
Cc: Xuan Zhuo <xuanzhuo@linux.alibaba.com>
Cc: "Eugenio Pérez" <eperezma@redhat.com>
Cc: Maxime Coquelin <maxime.coquelin@redhat.com>
Signed-off-by: Barry Song <v-songbaohua@oppo.com>
---
 drivers/vdpa/vdpa_user/iova_domain.c | 21 ++++++++++++++++-----
 drivers/vdpa/vdpa_user/iova_domain.h |  3 ++-
 drivers/vdpa/vdpa_user/vduse_dev.c   | 13 ++++++++++++-
 3 files changed, 30 insertions(+), 7 deletions(-)

diff --git a/drivers/vdpa/vdpa_user/iova_domain.c b/drivers/vdpa/vdpa_user/iova_domain.c
index 791d38d6284c..014809ac2b7c 100644
--- a/drivers/vdpa/vdpa_user/iova_domain.c
+++ b/drivers/vdpa/vdpa_user/iova_domain.c
@@ -283,7 +283,7 @@ int vduse_domain_add_user_bounce_pages(struct vduse_iova_domain *domain,
 	return ret;
 }
 
-void vduse_domain_remove_user_bounce_pages(struct vduse_iova_domain *domain)
+void vduse_domain_remove_user_bounce_pages(struct vduse_iova_domain *domain, struct page **pages)
 {
 	struct vduse_bounce_map *map;
 	unsigned long i, count;
@@ -294,15 +294,16 @@ void vduse_domain_remove_user_bounce_pages(struct vduse_iova_domain *domain)
 
 	count = domain->bounce_size >> PAGE_SHIFT;
 	for (i = 0; i < count; i++) {
-		struct page *page = NULL;
+		struct page *page = pages[i];
 
 		map = &domain->bounce_maps[i];
-		if (WARN_ON(!map->bounce_page))
+		if (WARN_ON(!map->bounce_page)) {
+			put_page(page);
 			continue;
+		}
 
 		/* Copy user page to kernel page if it's in use */
 		if (map->orig_phys != INVALID_PHYS_ADDR) {
-			page = alloc_page(GFP_ATOMIC | __GFP_NOFAIL);
 			memcpy_from_page(page_address(page),
 					 map->bounce_page, 0, PAGE_SIZE);
 		}
@@ -543,10 +544,19 @@ static int vduse_domain_mmap(struct file *file, struct vm_area_struct *vma)
 static int vduse_domain_release(struct inode *inode, struct file *file)
 {
 	struct vduse_iova_domain *domain = file->private_data;
+	struct page **pages = NULL;
+	unsigned long count, i;
+
+	if (domain->user_bounce_pages) {
+		count = domain->bounce_size >> PAGE_SHIFT;
+		pages = kmalloc_array(count, sizeof(*pages), GFP_KERNEL | __GFP_NOFAIL);
+		for (i = 0; i < count; i++)
+			pages[i] = alloc_page(GFP_KERNEL | __GFP_NOFAIL);
+	}
 
 	spin_lock(&domain->iotlb_lock);
 	vduse_iotlb_del_range(domain, 0, ULLONG_MAX);
-	vduse_domain_remove_user_bounce_pages(domain);
+	vduse_domain_remove_user_bounce_pages(domain, pages);
 	vduse_domain_free_kernel_bounce_pages(domain);
 	spin_unlock(&domain->iotlb_lock);
 	put_iova_domain(&domain->stream_iovad);
@@ -554,6 +564,7 @@ static int vduse_domain_release(struct inode *inode, struct file *file)
 	vhost_iotlb_free(domain->iotlb);
 	vfree(domain->bounce_maps);
 	kfree(domain);
+	kfree(pages);
 
 	return 0;
 }
diff --git a/drivers/vdpa/vdpa_user/iova_domain.h b/drivers/vdpa/vdpa_user/iova_domain.h
index f92f22a7267d..db0b793d86db 100644
--- a/drivers/vdpa/vdpa_user/iova_domain.h
+++ b/drivers/vdpa/vdpa_user/iova_domain.h
@@ -74,7 +74,8 @@ void vduse_domain_reset_bounce_map(struct vduse_iova_domain *domain);
 int vduse_domain_add_user_bounce_pages(struct vduse_iova_domain *domain,
 				       struct page **pages, int count);
 
-void vduse_domain_remove_user_bounce_pages(struct vduse_iova_domain *domain);
+void vduse_domain_remove_user_bounce_pages(struct vduse_iova_domain *domain,
+					   struct page **pages);
 
 void vduse_domain_destroy(struct vduse_iova_domain *domain);
 
diff --git a/drivers/vdpa/vdpa_user/vduse_dev.c b/drivers/vdpa/vdpa_user/vduse_dev.c
index 7ae99691efdf..df7c1b6f1350 100644
--- a/drivers/vdpa/vdpa_user/vduse_dev.c
+++ b/drivers/vdpa/vdpa_user/vduse_dev.c
@@ -1030,6 +1030,8 @@ static int vduse_dev_queue_irq_work(struct vduse_dev *dev,
 static int vduse_dev_dereg_umem(struct vduse_dev *dev,
 				u64 iova, u64 size)
 {
+	struct page **pages = NULL;
+	unsigned long count, i;
 	int ret;
 
 	mutex_lock(&dev->mem_lock);
@@ -1044,13 +1046,22 @@ static int vduse_dev_dereg_umem(struct vduse_dev *dev,
 	if (dev->umem->iova != iova || size != dev->domain->bounce_size)
 		goto unlock;
 
-	vduse_domain_remove_user_bounce_pages(dev->domain);
+	if (dev->domain->user_bounce_pages) {
+		count = dev->domain->bounce_size >> PAGE_SHIFT;
+		pages = kmalloc_array(count, sizeof(*pages),
+				      GFP_KERNEL | __GFP_NOFAIL);
+		for (i = 0; i < count; i++)
+			pages[i] = alloc_page(GFP_KERNEL | __GFP_NOFAIL);
+	}
+
+	vduse_domain_remove_user_bounce_pages(dev->domain, pages);
 	unpin_user_pages_dirty_lock(dev->umem->pages,
 				    dev->umem->npages, true);
 	atomic64_sub(dev->umem->npages, &dev->umem->mm->pinned_vm);
 	mmdrop(dev->umem->mm);
 	vfree(dev->umem->pages);
 	kfree(dev->umem);
+	kfree(pages);
 	dev->umem = NULL;
 	ret = 0;
 unlock:
Barry Song July 30, 2024, 3:08 a.m. UTC | #7
On Tue, Jul 30, 2024 at 10:49 AM Jason Wang <jasowang@redhat.com> wrote:
>
> On Mon, Jul 29, 2024 at 2:05 PM Barry Song <21cnbao@gmail.com> wrote:
> >
> > On Mon, Jul 29, 2024 at 3:42 PM Jason Wang <jasowang@redhat.com> wrote:
> > >
> > > On Thu, Jul 25, 2024 at 3:00 PM Barry Song <21cnbao@gmail.com> wrote:
> > > >
> > > > On Thu, Jul 25, 2024 at 6:08 PM Michal Hocko <mhocko@suse.com> wrote:
> > > > >
> > > > > On Thu 25-07-24 10:50:45, Barry Song wrote:
> > > > > > On Thu, Jul 25, 2024 at 12:27 AM Michal Hocko <mhocko@suse.com> wrote:
> > > > > > >
> > > > > > > On Wed 24-07-24 20:55:40, Barry Song wrote:
> > > > > [...]
> > > > > > > > diff --git a/drivers/vdpa/vdpa_user/iova_domain.c b/drivers/vdpa/vdpa_user/iova_domain.c
> > > > > > > > index 791d38d6284c..eff700e5f7a2 100644
> > > > > > > > --- a/drivers/vdpa/vdpa_user/iova_domain.c
> > > > > > > > +++ b/drivers/vdpa/vdpa_user/iova_domain.c
> > > > > > > > @@ -287,28 +287,44 @@ void vduse_domain_remove_user_bounce_pages(struct vduse_iova_domain *domain)
> > > > > > > >  {
> > > > > > > >       struct vduse_bounce_map *map;
> > > > > > > >       unsigned long i, count;
> > > > > > > > +     struct page **pages = NULL;
> > > > > > > >
> > > > > > > >       write_lock(&domain->bounce_lock);
> > > > > > > >       if (!domain->user_bounce_pages)
> > > > > > > >               goto out;
> > > > > > > > -
> > > > > > > >       count = domain->bounce_size >> PAGE_SHIFT;
> > > > > > > > +     write_unlock(&domain->bounce_lock);
> > > > > > > > +
> > > > > > > > +     pages = kmalloc_array(count, sizeof(*pages), GFP_KERNEL | __GFP_NOFAIL);
> > > > > > > > +     for (i = 0; i < count; i++)
> > > > > > > > +             pages[i] = alloc_page(GFP_KERNEL | __GFP_NOFAIL);
> > > > > > >
> > > > > > > AFAICS vduse_domain_release calls this function with
> > > > > > > spin_lock(&domain->iotlb_lock) so dropping &domain->bounce_lock is not
> > > > > > > sufficient.
> > > > > >
> > > > > > yes. this is true:
> > > > > >
> > > > > > static int vduse_domain_release(struct inode *inode, struct file *file)
> > > > > > {
> > > > > >         struct vduse_iova_domain *domain = file->private_data;
> > > > > >
> > > > > >         spin_lock(&domain->iotlb_lock);
> > > > > >         vduse_iotlb_del_range(domain, 0, ULLONG_MAX);
> > > > > >         vduse_domain_remove_user_bounce_pages(domain);
> > > > > >         vduse_domain_free_kernel_bounce_pages(domain);
> > > > > >         spin_unlock(&domain->iotlb_lock);
> > > > > >         put_iova_domain(&domain->stream_iovad);
> > > > > >         put_iova_domain(&domain->consistent_iovad);
> > > > > >         vhost_iotlb_free(domain->iotlb);
> > > > > >         vfree(domain->bounce_maps);
> > > > > >         kfree(domain);
> > > > > >
> > > > > >         return 0;
> > > > > > }
> > > > > >
> > > > > > This is quite a pain. I admit I don't have knowledge of this driver, and I don't
> > > > > > think it's safe to release two locks and then reacquire them. The situation is
> > > > > > rather complex. Therefore, I would prefer if the VDPA maintainers could
> > > > > > take the lead in implementing a proper fix.
> > > > >
> > > > > Would it be possible to move all that work to a deferred context?
> > > >
> > > > My understanding is that we need to be aware of both the iotlb_lock and
> > > > bounce_lock to implement the correct changes. As long as we still need
> > > > to acquire these two locks in a deferred context, there doesn't seem to
> > > > be any difference.
> > > >
> > > > I can do the memory pre-allocation before spin_lock(&domain->iotlb_lock),
> > > > but I have no knowledge whether the "count" will change after I make
> > > > the preallocation.
> > > >
> > > > diff --git a/drivers/vdpa/vdpa_user/iova_domain.c
> > > > b/drivers/vdpa/vdpa_user/iova_domain.c
> > > > index 791d38d6284c..7ec87ef33d42 100644
> > > > --- a/drivers/vdpa/vdpa_user/iova_domain.c
> > > > +++ b/drivers/vdpa/vdpa_user/iova_domain.c
> > > > @@ -544,9 +544,12 @@ static int vduse_domain_release(struct inode
> > > > *inode, struct file *file)
> > > >  {
> > > >         struct vduse_iova_domain *domain = file->private_data;
> > > >
> > > > +      struct page **pages;
> > > > +      spin_lock(&domain->iotlb_lock); maybe also + bounce_lock?
> > > > +      count = domain->bounce_size >> PAGE_SHIFT;
> > > > +      spin_unlock(&domain->iotlb_lock);
> > >
> > > We probably don't need any lock here as bounce_size won't be changed .
> > >
> > > > +
> > > > +       preallocate_count_pages(pages, count);
> > > > +
> > > > ....
> > > >         spin_lock(&domain->iotlb_lock);
> > > >         vduse_iotlb_del_range(domain, 0, ULLONG_MAX);
> > > > -       vduse_domain_remove_user_bounce_pages(domain);
> > > > +       vduse_domain_remove_user_bounce_pages(domain, pages);
> > > >         vduse_domain_free_kernel_bounce_pages(domain);
> > > >         spin_unlock(&domain->iotlb_lock);
> > > >         put_iova_domain(&domain->stream_iovad);
> > >
> > > This seems to work.
> >
> > Thanks, Jason. I personally have no knowledge of vDPA. Could you please help
> > review and test the patch below?
> >
> > From 1f3cae091159bfcaffdb4a999a4a8e37db2eacf1 Mon Sep 17 00:00:00 2001
> > From: Barry Song <v-songbaohua@oppo.com>
> > Date: Wed, 24 Jul 2024 20:55:40 +1200
> > Subject: [PATCH RFC v2] vpda: try to fix the potential crash due to misusing
> >  __GFP_NOFAIL
> > MIME-Version: 1.0
> > Content-Type: text/plain; charset=UTF-8
> > Content-Transfer-Encoding: 8bit
> >
> > mm doesn't support non-blockable __GFP_NOFAIL allocation. Because
> > __GFP_NOFAIL without direct reclamation may just result in a busy
> > loop within non-sleepable contexts.
> >
> > static inline struct page *
> > __alloc_pages_slowpath(gfp_t gfp_mask, unsigned int order,
> >                                                 struct alloc_context *ac)
> > {
> >         ...
> >         /*
> >          * Make sure that __GFP_NOFAIL request doesn't leak out and make sure
> >          * we always retry
> >          */
> >         if (gfp_mask & __GFP_NOFAIL) {
> >                 /*
> >                  * All existing users of the __GFP_NOFAIL are blockable, so warn
> >                  * of any new users that actually require GFP_NOWAIT
> >                  */
> >                 if (WARN_ON_ONCE_GFP(!can_direct_reclaim, gfp_mask))
> >                         goto fail;
> >                 ...
> >         }
> >         ...
> > fail:
> >         warn_alloc(gfp_mask, ac->nodemask,
> >                         "page allocation failure: order:%u", order);
> > got_pg:
> >         return page;
> > }
> >
> > Let's move the memory allocation out of the atomic context and use
> > the normal sleepable context to get pages.
> >
> > [RFC]: This has only been compile-tested; I'd prefer if the VDPA maintainers
> > handles it.
> >
> > Cc: "Michael S. Tsirkin" <mst@redhat.com>
> > Cc: Jason Wang <jasowang@redhat.com>
> > Cc: Xuan Zhuo <xuanzhuo@linux.alibaba.com>
> > Cc: "Eugenio Pérez" <eperezma@redhat.com>
> > Cc: Maxime Coquelin <maxime.coquelin@redhat.com>
> > Signed-off-by: Barry Song <v-songbaohua@oppo.com>
> > ---
> >  drivers/vdpa/vdpa_user/iova_domain.c | 21 ++++++++++++++++-----
> >  drivers/vdpa/vdpa_user/iova_domain.h |  3 ++-
> >  drivers/vdpa/vdpa_user/vduse_dev.c   | 13 ++++++++++++-
> >  3 files changed, 30 insertions(+), 7 deletions(-)
> >
> > diff --git a/drivers/vdpa/vdpa_user/iova_domain.c b/drivers/vdpa/vdpa_user/iova_domain.c
> > index 791d38d6284c..014809ac2b7c 100644
> > --- a/drivers/vdpa/vdpa_user/iova_domain.c
> > +++ b/drivers/vdpa/vdpa_user/iova_domain.c
> > @@ -283,7 +283,7 @@ int vduse_domain_add_user_bounce_pages(struct vduse_iova_domain *domain,
> >         return ret;
> >  }
> >
> > -void vduse_domain_remove_user_bounce_pages(struct vduse_iova_domain *domain)
> > +void vduse_domain_remove_user_bounce_pages(struct vduse_iova_domain *domain, struct page **pages)
> >  {
> >         struct vduse_bounce_map *map;
> >         unsigned long i, count;
> > @@ -294,15 +294,16 @@ void vduse_domain_remove_user_bounce_pages(struct vduse_iova_domain *domain)
> >
> >         count = domain->bounce_size >> PAGE_SHIFT;
> >         for (i = 0; i < count; i++) {
> > -               struct page *page = NULL;
> > +               struct page *page = pages[i];
> >
> >                 map = &domain->bounce_maps[i];
> > -               if (WARN_ON(!map->bounce_page))
> > +               if (WARN_ON(!map->bounce_page)) {
> > +                       put_page(page);
> >                         continue;
> > +               }
> >
> >                 /* Copy user page to kernel page if it's in use */
> >                 if (map->orig_phys != INVALID_PHYS_ADDR) {
> > -                       page = alloc_page(GFP_ATOMIC | __GFP_NOFAIL);
> >                         memcpy_from_page(page_address(page),
> >                                          map->bounce_page, 0, PAGE_SIZE);
> >                 }
> > @@ -543,10 +544,19 @@ static int vduse_domain_mmap(struct file *file, struct vm_area_struct *vma)
> >  static int vduse_domain_release(struct inode *inode, struct file *file)
> >  {
> >         struct vduse_iova_domain *domain = file->private_data;
> > +       struct page **pages = NULL;
> > +       unsigned long count, i;
> > +
> > +       if (domain->user_bounce_pages) {
> > +               count = domain->bounce_size >> PAGE_SHIFT;
> > +               pages = kmalloc_array(count, sizeof(*pages), GFP_KERNEL | __GFP_NOFAIL);
> > +               for (i = 0; i < count; i++)
> > +                       pages[i] = alloc_page(GFP_KERNEL | __GFP_NOFAIL);
> > +       }
> >
> >         spin_lock(&domain->iotlb_lock);
> >         vduse_iotlb_del_range(domain, 0, ULLONG_MAX);
> > -       vduse_domain_remove_user_bounce_pages(domain);
> > +       vduse_domain_remove_user_bounce_pages(domain, pages);
> >         vduse_domain_free_kernel_bounce_pages(domain);
> >         spin_unlock(&domain->iotlb_lock);
> >         put_iova_domain(&domain->stream_iovad);
> > @@ -554,6 +564,7 @@ static int vduse_domain_release(struct inode *inode, struct file *file)
> >         vhost_iotlb_free(domain->iotlb);
> >         vfree(domain->bounce_maps);
> >         kfree(domain);
> > +       kfree(pages);
> >
> >         return 0;
> >  }
> > diff --git a/drivers/vdpa/vdpa_user/iova_domain.h b/drivers/vdpa/vdpa_user/iova_domain.h
> > index f92f22a7267d..db0b793d86db 100644
> > --- a/drivers/vdpa/vdpa_user/iova_domain.h
> > +++ b/drivers/vdpa/vdpa_user/iova_domain.h
> > @@ -74,7 +74,8 @@ void vduse_domain_reset_bounce_map(struct vduse_iova_domain *domain);
> >  int vduse_domain_add_user_bounce_pages(struct vduse_iova_domain *domain,
> >                                        struct page **pages, int count);
> >
> > -void vduse_domain_remove_user_bounce_pages(struct vduse_iova_domain *domain);
> > +void vduse_domain_remove_user_bounce_pages(struct vduse_iova_domain *domain,
> > +                                          struct page **pages);
> >
> >  void vduse_domain_destroy(struct vduse_iova_domain *domain);
> >
> > diff --git a/drivers/vdpa/vdpa_user/vduse_dev.c b/drivers/vdpa/vdpa_user/vduse_dev.c
> > index 7ae99691efdf..df7c1b6f1350 100644
> > --- a/drivers/vdpa/vdpa_user/vduse_dev.c
> > +++ b/drivers/vdpa/vdpa_user/vduse_dev.c
> > @@ -1030,6 +1030,8 @@ static int vduse_dev_queue_irq_work(struct vduse_dev *dev,
> >  static int vduse_dev_dereg_umem(struct vduse_dev *dev,
> >                                 u64 iova, u64 size)
> >  {
> > +       struct page **pages = NULL;
> > +       unsigned long count, i;
> >         int ret;
> >
> >         mutex_lock(&dev->mem_lock);
> > @@ -1044,13 +1046,22 @@ static int vduse_dev_dereg_umem(struct vduse_dev *dev,
> >         if (dev->umem->iova != iova || size != dev->domain->bounce_size)
> >                 goto unlock;
> >
> > -       vduse_domain_remove_user_bounce_pages(dev->domain);
> > +       if (dev->domain->user_bounce_pages) {
> > +               count = dev->domain->bounce_size >> PAGE_SHIFT;
> > +               pages = kmalloc_array(count, sizeof(*pages),
> > +                                     GFP_KERNEL | __GFP_NOFAIL);
> > +               for (i = 0; i < count; i++)
> > +                       pages[i] = alloc_page(GFP_KERNEL | __GFP_NOFAIL);
> > +       }
>
> Nit: there's some code duplication with vduse_domain_release().
>
> Others look good to me.
>
> Would you like to post a formal patch?

 Jason, thanks!

I haven't tested this patch and I don't have the setup to test. I
wonder if you can
post a tested-by tag before I send a formal patch.

BTW, if we want to have a common function to remove the duplicated code,
what is the name you suggest to have for this function?

>
> Thanks
>
> > +
> > +       vduse_domain_remove_user_bounce_pages(dev->domain, pages);
> >         unpin_user_pages_dirty_lock(dev->umem->pages,
> >                                     dev->umem->npages, true);
> >         atomic64_sub(dev->umem->npages, &dev->umem->mm->pinned_vm);
> >         mmdrop(dev->umem->mm);
> >         vfree(dev->umem->pages);
> >         kfree(dev->umem);
> > +       kfree(pages);
> >         dev->umem = NULL;
> >         ret = 0;
> >  unlock:
> > --
> > 2.34.1
> >
> > >
> > > Thanks
> > >
> > > >
> > > >
> > > > > --
> > > > > Michal Hocko
> > > > > SUSE Labs
> > > >
> >
> > Thanks
> > Barry
> >
>

Thanks
Barry
diff mbox series

Patch

diff --git a/drivers/vdpa/vdpa_user/iova_domain.c b/drivers/vdpa/vdpa_user/iova_domain.c
index 791d38d6284c..eff700e5f7a2 100644
--- a/drivers/vdpa/vdpa_user/iova_domain.c
+++ b/drivers/vdpa/vdpa_user/iova_domain.c
@@ -287,28 +287,44 @@  void vduse_domain_remove_user_bounce_pages(struct vduse_iova_domain *domain)
 {
 	struct vduse_bounce_map *map;
 	unsigned long i, count;
+	struct page **pages = NULL;
 
 	write_lock(&domain->bounce_lock);
 	if (!domain->user_bounce_pages)
 		goto out;
-
 	count = domain->bounce_size >> PAGE_SHIFT;
+	write_unlock(&domain->bounce_lock);
+
+	pages = kmalloc_array(count, sizeof(*pages), GFP_KERNEL | __GFP_NOFAIL);
+	for (i = 0; i < count; i++)
+		pages[i] = alloc_page(GFP_KERNEL | __GFP_NOFAIL);
+
+	write_lock(&domain->bounce_lock);
+	if (!domain->user_bounce_pages) {
+		for (i = 0; i < count; i++)
+			put_page(pages[i]);
+		kfree(pages);
+		goto out;
+	}
+
 	for (i = 0; i < count; i++) {
-		struct page *page = NULL;
+		struct page *page = pages[i];
 
 		map = &domain->bounce_maps[i];
-		if (WARN_ON(!map->bounce_page))
+		if (WARN_ON(!map->bounce_page)) {
+			put_page(page);
 			continue;
+		}
 
 		/* Copy user page to kernel page if it's in use */
 		if (map->orig_phys != INVALID_PHYS_ADDR) {
-			page = alloc_page(GFP_ATOMIC | __GFP_NOFAIL);
 			memcpy_from_page(page_address(page),
 					 map->bounce_page, 0, PAGE_SIZE);
 		}
 		put_page(map->bounce_page);
 		map->bounce_page = page;
 	}
+	kfree(pages);
 	domain->user_bounce_pages = false;
 out:
 	write_unlock(&domain->bounce_lock);