Message ID | 20240909091851.1165742-4-link@vivo.com (mailing list archive) |
---|---|
State | New |
Headers | show |
Series | udmabuf bug fix and some improvements | expand |
Hi Huan, > Subject: [PATCH v6 3/7] udmabuf: fix vmap_udmabuf error page set > > Currently vmap_udmabuf set page's array by each folio. > But, ubuf->folios is only contain's the folio's head page. > > That mean we repeatedly mapped the folio head page to the vmalloc area. > > Due to udmabuf can use hugetlb, if HVO enabled, tail page may not exist, > so, we can't use page array to map, instead, use pfn array. > > By this, we removed page usage in udmabuf totally. > I think this would probably need a Fixes tag: Fixes: 5e72b2b41a21 ("udmabuf: convert udmabuf driver to use folios") Thanks, Vivek > Suggested-by: Vivek Kasireddy <vivek.kasireddy@intel.com> > Signed-off-by: Huan Yang <link@vivo.com> > Acked-by: Vivek Kasireddy <vivek.kasireddy@intel.com> > --- > drivers/dma-buf/Kconfig | 1 + > drivers/dma-buf/udmabuf.c | 22 +++++++++++++++------- > 2 files changed, 16 insertions(+), 7 deletions(-) > > diff --git a/drivers/dma-buf/Kconfig b/drivers/dma-buf/Kconfig > index b46eb8a552d7..fee04fdb0822 100644 > --- a/drivers/dma-buf/Kconfig > +++ b/drivers/dma-buf/Kconfig > @@ -36,6 +36,7 @@ config UDMABUF > depends on DMA_SHARED_BUFFER > depends on MEMFD_CREATE || COMPILE_TEST > depends on MMU > + select VMAP_PFN > help > A driver to let userspace turn memfd regions into dma-bufs. > Qemu can use this to create host dmabufs for guest framebuffers. > diff --git a/drivers/dma-buf/udmabuf.c b/drivers/dma-buf/udmabuf.c > index ba9dbc7caf71..aa182a9dcdfa 100644 > --- a/drivers/dma-buf/udmabuf.c > +++ b/drivers/dma-buf/udmabuf.c > @@ -103,21 +103,29 @@ static int mmap_udmabuf(struct dma_buf *buf, > struct vm_area_struct *vma) > static int vmap_udmabuf(struct dma_buf *buf, struct iosys_map *map) > { > struct udmabuf *ubuf = buf->priv; > - struct page **pages; > + unsigned long *pfns; > void *vaddr; > pgoff_t pg; > > dma_resv_assert_held(buf->resv); > > - pages = kvmalloc_array(ubuf->pagecount, sizeof(*pages), > GFP_KERNEL); > - if (!pages) > + /** > + * HVO may free tail pages, so just use pfn to map each folio > + * into vmalloc area. > + */ > + pfns = kvmalloc_array(ubuf->pagecount, sizeof(*pfns), GFP_KERNEL); > + if (!pfns) > return -ENOMEM; > > - for (pg = 0; pg < ubuf->pagecount; pg++) > - pages[pg] = &ubuf->folios[pg]->page; > + for (pg = 0; pg < ubuf->pagecount; pg++) { > + unsigned long pfn = folio_pfn(ubuf->folios[pg]); > > - vaddr = vm_map_ram(pages, ubuf->pagecount, -1); > - kvfree(pages); > + pfn += ubuf->offsets[pg] >> PAGE_SHIFT; > + pfns[pg] = pfn; > + } > + > + vaddr = vmap_pfn(pfns, ubuf->pagecount, PAGE_KERNEL); > + kvfree(pfns); > if (!vaddr) > return -EINVAL; > > -- > 2.45.2
在 2024/9/13 6:37, Kasireddy, Vivek 写道: > Hi Huan, > >> Subject: [PATCH v6 3/7] udmabuf: fix vmap_udmabuf error page set >> >> Currently vmap_udmabuf set page's array by each folio. >> But, ubuf->folios is only contain's the folio's head page. >> >> That mean we repeatedly mapped the folio head page to the vmalloc area. >> >> Due to udmabuf can use hugetlb, if HVO enabled, tail page may not exist, >> so, we can't use page array to map, instead, use pfn array. >> >> By this, we removed page usage in udmabuf totally. >> > I think this would probably need a Fixes tag: > Fixes: 5e72b2b41a21 ("udmabuf: convert udmabuf driver to use folios") OK, I'll update it > > Thanks, > Vivek > >> Suggested-by: Vivek Kasireddy <vivek.kasireddy@intel.com> >> Signed-off-by: Huan Yang <link@vivo.com> >> Acked-by: Vivek Kasireddy <vivek.kasireddy@intel.com> >> --- >> drivers/dma-buf/Kconfig | 1 + >> drivers/dma-buf/udmabuf.c | 22 +++++++++++++++------- >> 2 files changed, 16 insertions(+), 7 deletions(-) >> >> diff --git a/drivers/dma-buf/Kconfig b/drivers/dma-buf/Kconfig >> index b46eb8a552d7..fee04fdb0822 100644 >> --- a/drivers/dma-buf/Kconfig >> +++ b/drivers/dma-buf/Kconfig >> @@ -36,6 +36,7 @@ config UDMABUF >> depends on DMA_SHARED_BUFFER >> depends on MEMFD_CREATE || COMPILE_TEST >> depends on MMU >> + select VMAP_PFN >> help >> A driver to let userspace turn memfd regions into dma-bufs. >> Qemu can use this to create host dmabufs for guest framebuffers. >> diff --git a/drivers/dma-buf/udmabuf.c b/drivers/dma-buf/udmabuf.c >> index ba9dbc7caf71..aa182a9dcdfa 100644 >> --- a/drivers/dma-buf/udmabuf.c >> +++ b/drivers/dma-buf/udmabuf.c >> @@ -103,21 +103,29 @@ static int mmap_udmabuf(struct dma_buf *buf, >> struct vm_area_struct *vma) >> static int vmap_udmabuf(struct dma_buf *buf, struct iosys_map *map) >> { >> struct udmabuf *ubuf = buf->priv; >> - struct page **pages; >> + unsigned long *pfns; >> void *vaddr; >> pgoff_t pg; >> >> dma_resv_assert_held(buf->resv); >> >> - pages = kvmalloc_array(ubuf->pagecount, sizeof(*pages), >> GFP_KERNEL); >> - if (!pages) >> + /** >> + * HVO may free tail pages, so just use pfn to map each folio >> + * into vmalloc area. >> + */ >> + pfns = kvmalloc_array(ubuf->pagecount, sizeof(*pfns), GFP_KERNEL); >> + if (!pfns) >> return -ENOMEM; >> >> - for (pg = 0; pg < ubuf->pagecount; pg++) >> - pages[pg] = &ubuf->folios[pg]->page; >> + for (pg = 0; pg < ubuf->pagecount; pg++) { >> + unsigned long pfn = folio_pfn(ubuf->folios[pg]); >> >> - vaddr = vm_map_ram(pages, ubuf->pagecount, -1); >> - kvfree(pages); >> + pfn += ubuf->offsets[pg] >> PAGE_SHIFT; >> + pfns[pg] = pfn; >> + } >> + >> + vaddr = vmap_pfn(pfns, ubuf->pagecount, PAGE_KERNEL); >> + kvfree(pfns); >> if (!vaddr) >> return -EINVAL; >> >> -- >> 2.45.2
diff --git a/drivers/dma-buf/Kconfig b/drivers/dma-buf/Kconfig index b46eb8a552d7..fee04fdb0822 100644 --- a/drivers/dma-buf/Kconfig +++ b/drivers/dma-buf/Kconfig @@ -36,6 +36,7 @@ config UDMABUF depends on DMA_SHARED_BUFFER depends on MEMFD_CREATE || COMPILE_TEST depends on MMU + select VMAP_PFN help A driver to let userspace turn memfd regions into dma-bufs. Qemu can use this to create host dmabufs for guest framebuffers. diff --git a/drivers/dma-buf/udmabuf.c b/drivers/dma-buf/udmabuf.c index ba9dbc7caf71..aa182a9dcdfa 100644 --- a/drivers/dma-buf/udmabuf.c +++ b/drivers/dma-buf/udmabuf.c @@ -103,21 +103,29 @@ static int mmap_udmabuf(struct dma_buf *buf, struct vm_area_struct *vma) static int vmap_udmabuf(struct dma_buf *buf, struct iosys_map *map) { struct udmabuf *ubuf = buf->priv; - struct page **pages; + unsigned long *pfns; void *vaddr; pgoff_t pg; dma_resv_assert_held(buf->resv); - pages = kvmalloc_array(ubuf->pagecount, sizeof(*pages), GFP_KERNEL); - if (!pages) + /** + * HVO may free tail pages, so just use pfn to map each folio + * into vmalloc area. + */ + pfns = kvmalloc_array(ubuf->pagecount, sizeof(*pfns), GFP_KERNEL); + if (!pfns) return -ENOMEM; - for (pg = 0; pg < ubuf->pagecount; pg++) - pages[pg] = &ubuf->folios[pg]->page; + for (pg = 0; pg < ubuf->pagecount; pg++) { + unsigned long pfn = folio_pfn(ubuf->folios[pg]); - vaddr = vm_map_ram(pages, ubuf->pagecount, -1); - kvfree(pages); + pfn += ubuf->offsets[pg] >> PAGE_SHIFT; + pfns[pg] = pfn; + } + + vaddr = vmap_pfn(pfns, ubuf->pagecount, PAGE_KERNEL); + kvfree(pfns); if (!vaddr) return -EINVAL;