Context |
Check |
Description |
bjorn/pre-ci_am |
success
|
Success
|
bjorn/build-rv32-defconfig |
success
|
build-rv32-defconfig
|
bjorn/build-rv64-clang-allmodconfig |
success
|
build-rv64-clang-allmodconfig
|
bjorn/build-rv64-gcc-allmodconfig |
success
|
build-rv64-gcc-allmodconfig
|
bjorn/build-rv64-nommu-k210-defconfig |
success
|
build-rv64-nommu-k210-defconfig
|
bjorn/build-rv64-nommu-k210-virt |
success
|
build-rv64-nommu-k210-virt
|
bjorn/checkpatch |
success
|
checkpatch
|
bjorn/dtb-warn-rv64 |
success
|
dtb-warn-rv64
|
bjorn/header-inline |
success
|
header-inline
|
bjorn/kdoc |
success
|
kdoc
|
bjorn/module-param |
success
|
module-param
|
bjorn/verify-fixes |
success
|
verify-fixes
|
bjorn/verify-signedoff |
success
|
verify-signedoff
|
@@ -46,40 +46,6 @@ static inline void __iommu_free_account(struct page *page, int order)
mod_lruvec_page_state(page, NR_SECONDARY_PAGETABLE, -pgcnt);
}
-/**
- * __iommu_alloc_pages - allocate a zeroed page of a given order.
- * @gfp: buddy allocator flags
- * @order: page order
- *
- * returns the head struct page of the allocated page.
- */
-static inline struct page *__iommu_alloc_pages(gfp_t gfp, int order)
-{
- struct page *page;
-
- page = alloc_pages(gfp | __GFP_ZERO, order);
- if (unlikely(!page))
- return NULL;
-
- __iommu_alloc_account(page, order);
-
- return page;
-}
-
-/**
- * __iommu_free_pages - free page of a given order
- * @page: head struct page of the page
- * @order: page order
- */
-static inline void __iommu_free_pages(struct page *page, int order)
-{
- if (!page)
- return;
-
- __iommu_free_account(page, order);
- __free_pages(page, order);
-}
-
/**
* iommu_alloc_pages_node - allocate a zeroed page of a given order from
* specific NUMA node.
@@ -110,12 +76,7 @@ static inline void *iommu_alloc_pages_node(int nid, gfp_t gfp, int order)
*/
static inline void *iommu_alloc_pages(gfp_t gfp, int order)
{
- struct page *page = __iommu_alloc_pages(gfp, order);
-
- if (unlikely(!page))
- return NULL;
-
- return page_address(page);
+ return iommu_alloc_pages_node(numa_node_id(), gfp, order);
}
/**
@@ -138,7 +99,7 @@ static inline void *iommu_alloc_page_node(int nid, gfp_t gfp)
*/
static inline void *iommu_alloc_page(gfp_t gfp)
{
- return iommu_alloc_pages(gfp, 0);
+ return iommu_alloc_pages_node(numa_node_id(), gfp, 0);
}
/**
@@ -148,10 +109,14 @@ static inline void *iommu_alloc_page(gfp_t gfp)
*/
static inline void iommu_free_pages(void *virt, int order)
{
+ struct page *page;
+
if (!virt)
return;
- __iommu_free_pages(virt_to_page(virt), order);
+ page = virt_to_page(virt);
+ __iommu_free_account(page, order);
+ __free_pages(page, order);
}
/**
These were only used by tegra-smmu and leaked the struct page out of the API. Delete them since tega-smmu has been converted to the other APIs. In the process flatten the call tree so we have fewer one line functions calling other one line functions.. iommu_alloc_pages_node() is the real allocator and everything else can just call it directly. Signed-off-by: Jason Gunthorpe <jgg@nvidia.com> --- drivers/iommu/iommu-pages.h | 49 ++++++------------------------------- 1 file changed, 7 insertions(+), 42 deletions(-)