diff mbox series

binder_alloc: Add missing mmap_lock calls when using the VMA

Message ID 20220809160618.1052539-1-Liam.Howlett@oracle.com (mailing list archive)
State New
Headers show
Series binder_alloc: Add missing mmap_lock calls when using the VMA | expand

Commit Message

Liam R. Howlett Aug. 9, 2022, 4:06 p.m. UTC
Take the mmap_read_lock() when using the VMA in
binder_alloc_print_pages() and when checking for a VMA in
binder_alloc_new_buf_locked().

It is worth noting binder_alloc_new_buf_locked() drops the VMA read lock
after it verifies a VMA exists, but may be taken again deeper in the
call stack, if necessary.

Reported-by: Ondrej Mosnacek <omosnace@redhat.com>
Reported-by: syzbot+a7b60a176ec13cafb793@syzkaller.appspotmail.com
Fixes: a43cfc87caaf (android: binder: stop saving a pointer to the VMA)
Signed-off-by: Liam R. Howlett <Liam.Howlett@oracle.com>
---
 drivers/android/binder_alloc.c | 29 +++++++++++++++++++----------
 1 file changed, 19 insertions(+), 10 deletions(-)

Comments

Carlos Llamas Aug. 9, 2022, 6:49 p.m. UTC | #1
On Tue, Aug 09, 2022 at 04:06:31PM +0000, Liam Howlett wrote:
> Take the mmap_read_lock() when using the VMA in
> binder_alloc_print_pages() and when checking for a VMA in
> binder_alloc_new_buf_locked().
> 
> It is worth noting binder_alloc_new_buf_locked() drops the VMA read lock
> after it verifies a VMA exists, but may be taken again deeper in the
> call stack, if necessary.
> 
> Reported-by: Ondrej Mosnacek <omosnace@redhat.com>
> Reported-by: syzbot+a7b60a176ec13cafb793@syzkaller.appspotmail.com
> Fixes: a43cfc87caaf (android: binder: stop saving a pointer to the VMA)
> Signed-off-by: Liam R. Howlett <Liam.Howlett@oracle.com>
> ---
>  drivers/android/binder_alloc.c | 29 +++++++++++++++++++----------
>  1 file changed, 19 insertions(+), 10 deletions(-)
> 
> diff --git a/drivers/android/binder_alloc.c b/drivers/android/binder_alloc.c
> index f555eebceef6..c23cad7fe313 100644
> --- a/drivers/android/binder_alloc.c
> +++ b/drivers/android/binder_alloc.c
> @@ -395,12 +395,15 @@ static struct binder_buffer *binder_alloc_new_buf_locked(
>  	size_t size, data_offsets_size;
>  	int ret;
>  
> +	mmap_read_lock(alloc->vma_vm_mm);
>  	if (!binder_alloc_get_vma(alloc)) {
> +		mmap_read_unlock(alloc->vma_vm_mm);
>  		binder_alloc_debug(BINDER_DEBUG_USER_ERROR,
>  				   "%d: binder_alloc_buf, no vma\n",
>  				   alloc->pid);
>  		return ERR_PTR(-ESRCH);
>  	}
> +	mmap_read_unlock(alloc->vma_vm_mm);
>  
>  	data_offsets_size = ALIGN(data_size, sizeof(void *)) +
>  		ALIGN(offsets_size, sizeof(void *));
> @@ -922,17 +925,23 @@ void binder_alloc_print_pages(struct seq_file *m,
>  	 * Make sure the binder_alloc is fully initialized, otherwise we might
>  	 * read inconsistent state.
>  	 */
> -	if (binder_alloc_get_vma(alloc) != NULL) {
> -		for (i = 0; i < alloc->buffer_size / PAGE_SIZE; i++) {
> -			page = &alloc->pages[i];
> -			if (!page->page_ptr)
> -				free++;
> -			else if (list_empty(&page->lru))
> -				active++;
> -			else
> -				lru++;
> -		}
> +
> +	mmap_read_lock(alloc->vma_vm_mm);
> +	if (binder_alloc_get_vma(alloc) == NULL)
> +		goto uninitialized;
> +
> +	for (i = 0; i < alloc->buffer_size / PAGE_SIZE; i++) {

do we need to hold on to the lock while we loop through the pages here?

> +		page = &alloc->pages[i];
> +		if (!page->page_ptr)
> +			free++;
> +		else if (list_empty(&page->lru))
> +			active++;
> +		else
> +			lru++;
>  	}
> +
> +uninitialized:
> +	mmap_read_unlock(alloc->vma_vm_mm);
>  	mutex_unlock(&alloc->mutex);
>  	seq_printf(m, "  pages: %d:%d:%d\n", active, lru, free);
>  	seq_printf(m, "  pages high watermark: %zu\n", alloc->pages_high);
> -- 
> 2.35.1
Liam R. Howlett Aug. 9, 2022, 7:02 p.m. UTC | #2
* Carlos Llamas <cmllamas@google.com> [220809 14:49]:
> On Tue, Aug 09, 2022 at 04:06:31PM +0000, Liam Howlett wrote:
> > Take the mmap_read_lock() when using the VMA in
> > binder_alloc_print_pages() and when checking for a VMA in
> > binder_alloc_new_buf_locked().
> > 
> > It is worth noting binder_alloc_new_buf_locked() drops the VMA read lock
> > after it verifies a VMA exists, but may be taken again deeper in the
> > call stack, if necessary.
> > 
> > Reported-by: Ondrej Mosnacek <omosnace@redhat.com>
> > Reported-by: syzbot+a7b60a176ec13cafb793@syzkaller.appspotmail.com
> > Fixes: a43cfc87caaf (android: binder: stop saving a pointer to the VMA)
> > Signed-off-by: Liam R. Howlett <Liam.Howlett@oracle.com>
> > ---
> >  drivers/android/binder_alloc.c | 29 +++++++++++++++++++----------
> >  1 file changed, 19 insertions(+), 10 deletions(-)
> > 
> > diff --git a/drivers/android/binder_alloc.c b/drivers/android/binder_alloc.c
> > index f555eebceef6..c23cad7fe313 100644
> > --- a/drivers/android/binder_alloc.c
> > +++ b/drivers/android/binder_alloc.c
> > @@ -395,12 +395,15 @@ static struct binder_buffer *binder_alloc_new_buf_locked(
> >  	size_t size, data_offsets_size;
> >  	int ret;
> >  
> > +	mmap_read_lock(alloc->vma_vm_mm);
> >  	if (!binder_alloc_get_vma(alloc)) {
> > +		mmap_read_unlock(alloc->vma_vm_mm);
> >  		binder_alloc_debug(BINDER_DEBUG_USER_ERROR,
> >  				   "%d: binder_alloc_buf, no vma\n",
> >  				   alloc->pid);
> >  		return ERR_PTR(-ESRCH);
> >  	}
> > +	mmap_read_unlock(alloc->vma_vm_mm);
> >  
> >  	data_offsets_size = ALIGN(data_size, sizeof(void *)) +
> >  		ALIGN(offsets_size, sizeof(void *));
> > @@ -922,17 +925,23 @@ void binder_alloc_print_pages(struct seq_file *m,
> >  	 * Make sure the binder_alloc is fully initialized, otherwise we might
> >  	 * read inconsistent state.
> >  	 */
> > -	if (binder_alloc_get_vma(alloc) != NULL) {
> > -		for (i = 0; i < alloc->buffer_size / PAGE_SIZE; i++) {
> > -			page = &alloc->pages[i];
> > -			if (!page->page_ptr)
> > -				free++;
> > -			else if (list_empty(&page->lru))
> > -				active++;
> > -			else
> > -				lru++;
> > -		}
> > +
> > +	mmap_read_lock(alloc->vma_vm_mm);
> > +	if (binder_alloc_get_vma(alloc) == NULL)
> > +		goto uninitialized;
> > +
> > +	for (i = 0; i < alloc->buffer_size / PAGE_SIZE; i++) {
> 
> do we need to hold on to the lock while we loop through the pages here?

I think we do?  Holding this lock will ensure the pages don't go away, I
believe (looking at mm/rmap.c comments on locking at the top)?

In any case, this function is called from print_binder_proc_stats()
which looks to be a debugfs/debugging call so I thought safer would be
better than faster and with a potential race.

> 
> > +		page = &alloc->pages[i];
> > +		if (!page->page_ptr)
> > +			free++;
> > +		else if (list_empty(&page->lru))
> > +			active++;
> > +		else
> > +			lru++;
> >  	}
> > +
> > +uninitialized:
> > +	mmap_read_unlock(alloc->vma_vm_mm);
> >  	mutex_unlock(&alloc->mutex);
> >  	seq_printf(m, "  pages: %d:%d:%d\n", active, lru, free);
> >  	seq_printf(m, "  pages high watermark: %zu\n", alloc->pages_high);
> > -- 
> > 2.35.1
Carlos Llamas Aug. 9, 2022, 9:01 p.m. UTC | #3
On Tue, Aug 09, 2022 at 07:02:17PM +0000, Liam Howlett wrote:
> > 
> > do we need to hold on to the lock while we loop through the pages here?
> 
> I think we do?  Holding this lock will ensure the pages don't go away, I
> believe (looking at mm/rmap.c comments on locking at the top)?
> 
> In any case, this function is called from print_binder_proc_stats()
> which looks to be a debugfs/debugging call so I thought safer would be
> better than faster and with a potential race.

The pages are protected by alloc->mutex, so you could immediately
release the mmap lock after binder_alloc_get_vma() call. I agree this
is a debugging call so it would be nice to reduce contention.
Andrew Morton Aug. 10, 2022, 1:30 a.m. UTC | #4
On Tue, 9 Aug 2022 21:01:55 +0000 Carlos Llamas <cmllamas@google.com> wrote:

> On Tue, Aug 09, 2022 at 07:02:17PM +0000, Liam Howlett wrote:
> > > 
> > > do we need to hold on to the lock while we loop through the pages here?
> > 
> > I think we do?  Holding this lock will ensure the pages don't go away, I
> > believe (looking at mm/rmap.c comments on locking at the top)?
> > 
> > In any case, this function is called from print_binder_proc_stats()
> > which looks to be a debugfs/debugging call so I thought safer would be
> > better than faster and with a potential race.
> 
> The pages are protected by alloc->mutex, so you could immediately
> release the mmap lock after binder_alloc_get_vma() call. I agree this
> is a debugging call so it would be nice to reduce contention.

I'll queue this patch for testing, shall update it if there's a v2.
Liam R. Howlett Aug. 10, 2022, 2:04 a.m. UTC | #5
* Carlos Llamas <cmllamas@google.com> [220809 17:02]:
> On Tue, Aug 09, 2022 at 07:02:17PM +0000, Liam Howlett wrote:
> > > 
> > > do we need to hold on to the lock while we loop through the pages here?
> > 
> > I think we do?  Holding this lock will ensure the pages don't go away, I
> > believe (looking at mm/rmap.c comments on locking at the top)?
> > 
> > In any case, this function is called from print_binder_proc_stats()
> > which looks to be a debugfs/debugging call so I thought safer would be
> > better than faster and with a potential race.
> 
> The pages are protected by alloc->mutex, so you could immediately
> release the mmap lock after binder_alloc_get_vma() call. I agree this
> is a debugging call so it would be nice to reduce contention.

Oh, I see.  The reuse of page confused me here.  Yes, you are correct.

I can re-spin this patch.

Thanks,
Liam
diff mbox series

Patch

diff --git a/drivers/android/binder_alloc.c b/drivers/android/binder_alloc.c
index f555eebceef6..c23cad7fe313 100644
--- a/drivers/android/binder_alloc.c
+++ b/drivers/android/binder_alloc.c
@@ -395,12 +395,15 @@  static struct binder_buffer *binder_alloc_new_buf_locked(
 	size_t size, data_offsets_size;
 	int ret;
 
+	mmap_read_lock(alloc->vma_vm_mm);
 	if (!binder_alloc_get_vma(alloc)) {
+		mmap_read_unlock(alloc->vma_vm_mm);
 		binder_alloc_debug(BINDER_DEBUG_USER_ERROR,
 				   "%d: binder_alloc_buf, no vma\n",
 				   alloc->pid);
 		return ERR_PTR(-ESRCH);
 	}
+	mmap_read_unlock(alloc->vma_vm_mm);
 
 	data_offsets_size = ALIGN(data_size, sizeof(void *)) +
 		ALIGN(offsets_size, sizeof(void *));
@@ -922,17 +925,23 @@  void binder_alloc_print_pages(struct seq_file *m,
 	 * Make sure the binder_alloc is fully initialized, otherwise we might
 	 * read inconsistent state.
 	 */
-	if (binder_alloc_get_vma(alloc) != NULL) {
-		for (i = 0; i < alloc->buffer_size / PAGE_SIZE; i++) {
-			page = &alloc->pages[i];
-			if (!page->page_ptr)
-				free++;
-			else if (list_empty(&page->lru))
-				active++;
-			else
-				lru++;
-		}
+
+	mmap_read_lock(alloc->vma_vm_mm);
+	if (binder_alloc_get_vma(alloc) == NULL)
+		goto uninitialized;
+
+	for (i = 0; i < alloc->buffer_size / PAGE_SIZE; i++) {
+		page = &alloc->pages[i];
+		if (!page->page_ptr)
+			free++;
+		else if (list_empty(&page->lru))
+			active++;
+		else
+			lru++;
 	}
+
+uninitialized:
+	mmap_read_unlock(alloc->vma_vm_mm);
 	mutex_unlock(&alloc->mutex);
 	seq_printf(m, "  pages: %d:%d:%d\n", active, lru, free);
 	seq_printf(m, "  pages high watermark: %zu\n", alloc->pages_high);