@@ -189,7 +189,7 @@ via_free_sg_info(struct pci_dev *pdev, drm_via_sg_info_t *vsg)
for (i = 0; i < vsg->num_pages; ++i) {
if (NULL != (page = vsg->pages[i])) {
if (!PageReserved(page) && (DMA_FROM_DEVICE == vsg->direction))
- put_user_pages_dirty(&page, 1);
+ put_user_page_dirty(page);
else
put_user_page(page);
}
@@ -55,7 +55,7 @@ static void __ib_umem_release(struct ib_device *dev, struct ib_umem *umem, int d
for_each_sg_page(umem->sg_head.sgl, &sg_iter, umem->sg_nents, 0) {
page = sg_page_iter_page(&sg_iter);
if (umem->writable && dirty)
- put_user_pages_dirty_lock(&page, 1);
+ put_user_page_dirty_lock(page);
else
put_user_page(page);
}
@@ -76,7 +76,7 @@ static void usnic_uiom_put_pages(struct list_head *chunk_list, int dirty)
page = sg_page(sg);
pa = sg_phys(sg);
if (dirty)
- put_user_pages_dirty_lock(&page, 1);
+ put_user_page_dirty_lock(page);
else
put_user_page(page);
usnic_dbg("pa: %pa\n", &pa);
@@ -1061,6 +1061,16 @@ void put_user_pages_dirty(struct page **pages, unsigned long npages);
void put_user_pages_dirty_lock(struct page **pages, unsigned long npages);
void put_user_pages(struct page **pages, unsigned long npages);
+static inline void put_user_page_dirty(struct page *page)
+{
+ put_user_pages_dirty(&page, 1);
+}
+
+static inline void put_user_page_dirty_lock(struct page *page)
+{
+ put_user_pages_dirty_lock(&page, 1);
+}
+
#if defined(CONFIG_SPARSEMEM) && !defined(CONFIG_SPARSEMEM_VMEMMAP)
#define SECTION_IN_PAGE_FLAGS
#endif
@@ -171,7 +171,7 @@ static void xdp_umem_unpin_pages(struct xdp_umem *umem)
for (i = 0; i < umem->npgs; i++) {
struct page *page = umem->pgs[i];
- put_user_pages_dirty_lock(&page, 1);
+ put_user_page_dirty_lock(page);
}
kfree(umem->pgs);