diff mbox series

[v2,23/23] iommu/pages: Remove iommu_alloc_pages_node()

Message ID 23-v2-545d29711869+a76b5-iommu_pages_jgg@nvidia.com (mailing list archive)
State Handled Elsewhere
Headers show
Series iommu: Further abstract iommu-pages | expand

Checks

Context Check Description
bjorn/pre-ci_am success Success
bjorn/build-rv32-defconfig success build-rv32-defconfig
bjorn/build-rv64-clang-allmodconfig success build-rv64-clang-allmodconfig
bjorn/build-rv64-gcc-allmodconfig success build-rv64-gcc-allmodconfig
bjorn/build-rv64-nommu-k210-defconfig success build-rv64-nommu-k210-defconfig
bjorn/build-rv64-nommu-k210-virt success build-rv64-nommu-k210-virt
bjorn/checkpatch success checkpatch
bjorn/dtb-warn-rv64 success dtb-warn-rv64
bjorn/header-inline success header-inline
bjorn/kdoc success kdoc
bjorn/module-param success module-param
bjorn/verify-fixes success verify-fixes
bjorn/verify-signedoff success verify-signedoff

Commit Message

Jason Gunthorpe Feb. 14, 2025, 5:07 p.m. UTC
Intel is the only thing that uses this now, convert to the size versions,
trying to avoid PAGE_SHIFT.

Signed-off-by: Jason Gunthorpe <jgg@nvidia.com>
---
 drivers/iommu/intel/iommu.h         |  7 +++----
 drivers/iommu/intel/irq_remapping.c |  7 +++----
 drivers/iommu/intel/pasid.c         |  3 ++-
 drivers/iommu/intel/prq.c           |  3 ++-
 drivers/iommu/iommu-pages.h         | 16 ----------------
 5 files changed, 10 insertions(+), 26 deletions(-)

Comments

Baolu Lu Feb. 15, 2025, 9:47 a.m. UTC | #1
On 2/15/25 01:07, Jason Gunthorpe wrote:
> diff --git a/drivers/iommu/intel/iommu.h b/drivers/iommu/intel/iommu.h
> index dd980808998da9..1036ed0d899472 100644
> --- a/drivers/iommu/intel/iommu.h
> +++ b/drivers/iommu/intel/iommu.h
> @@ -493,14 +493,13 @@ struct q_inval {
>   
>   /* Page Request Queue depth */
>   #define PRQ_ORDER	4
> -#define PRQ_RING_MASK	((0x1000 << PRQ_ORDER) - 0x20)
> -#define PRQ_DEPTH	((0x1000 << PRQ_ORDER) >> 5)
> +#define PRQ_SIZE	(SZ_4K << PRQ_ORDER)
> +#define PRQ_RING_MASK	(PRQ_SIZE - 0x20)
> +#define PRQ_DEPTH	(PRQ_SIZE >> 5)
>   
>   struct dmar_pci_notify_info;
>   
>   #ifdef CONFIG_IRQ_REMAP
> -/* 1MB - maximum possible interrupt remapping table size */

Can we keep this line of comment,

and move it ...

> -#define INTR_REMAP_PAGE_ORDER	8
>   #define INTR_REMAP_TABLE_REG_SIZE	0xf
>   #define INTR_REMAP_TABLE_REG_SIZE_MASK  0xf
>   
> diff --git a/drivers/iommu/intel/irq_remapping.c b/drivers/iommu/intel/irq_remapping.c
> index d6b796f8f100cd..735e26498ee9f2 100644
> --- a/drivers/iommu/intel/irq_remapping.c
> +++ b/drivers/iommu/intel/irq_remapping.c
> @@ -538,11 +538,10 @@ static int intel_setup_irq_remapping(struct intel_iommu *iommu)
>   	if (!ir_table)
>   		return -ENOMEM;
>   
> -	ir_table_base = iommu_alloc_pages_node(iommu->node, GFP_KERNEL,
> -					       INTR_REMAP_PAGE_ORDER);
> +	ir_table_base =

... here?

> +		iommu_alloc_pages_node_sz(iommu->node, GFP_KERNEL, SZ_1M);
>   	if (!ir_table_base) {
> -		pr_err("IR%d: failed to allocate pages of order %d\n",
> -		       iommu->seq_id, INTR_REMAP_PAGE_ORDER);
> +		pr_err("IR%d: failed to allocate 1M of pages\n", iommu->seq_id);
>   		goto out_free_table;
>   	}

Thanks,
baolu
Jason Gunthorpe Feb. 18, 2025, 8:21 p.m. UTC | #2
On Sat, Feb 15, 2025 at 05:47:51PM +0800, Baolu Lu wrote:
> > --- a/drivers/iommu/intel/irq_remapping.c
> > +++ b/drivers/iommu/intel/irq_remapping.c
> > @@ -538,11 +538,10 @@ static int intel_setup_irq_remapping(struct intel_iommu *iommu)
> >   	if (!ir_table)
> >   		return -ENOMEM;
> > -	ir_table_base = iommu_alloc_pages_node(iommu->node, GFP_KERNEL,
> > -					       INTR_REMAP_PAGE_ORDER);
> > +	ir_table_base =
> 
> ... here?

Done

Jason
diff mbox series

Patch

diff --git a/drivers/iommu/intel/iommu.h b/drivers/iommu/intel/iommu.h
index dd980808998da9..1036ed0d899472 100644
--- a/drivers/iommu/intel/iommu.h
+++ b/drivers/iommu/intel/iommu.h
@@ -493,14 +493,13 @@  struct q_inval {
 
 /* Page Request Queue depth */
 #define PRQ_ORDER	4
-#define PRQ_RING_MASK	((0x1000 << PRQ_ORDER) - 0x20)
-#define PRQ_DEPTH	((0x1000 << PRQ_ORDER) >> 5)
+#define PRQ_SIZE	(SZ_4K << PRQ_ORDER)
+#define PRQ_RING_MASK	(PRQ_SIZE - 0x20)
+#define PRQ_DEPTH	(PRQ_SIZE >> 5)
 
 struct dmar_pci_notify_info;
 
 #ifdef CONFIG_IRQ_REMAP
-/* 1MB - maximum possible interrupt remapping table size */
-#define INTR_REMAP_PAGE_ORDER	8
 #define INTR_REMAP_TABLE_REG_SIZE	0xf
 #define INTR_REMAP_TABLE_REG_SIZE_MASK  0xf
 
diff --git a/drivers/iommu/intel/irq_remapping.c b/drivers/iommu/intel/irq_remapping.c
index d6b796f8f100cd..735e26498ee9f2 100644
--- a/drivers/iommu/intel/irq_remapping.c
+++ b/drivers/iommu/intel/irq_remapping.c
@@ -538,11 +538,10 @@  static int intel_setup_irq_remapping(struct intel_iommu *iommu)
 	if (!ir_table)
 		return -ENOMEM;
 
-	ir_table_base = iommu_alloc_pages_node(iommu->node, GFP_KERNEL,
-					       INTR_REMAP_PAGE_ORDER);
+	ir_table_base =
+		iommu_alloc_pages_node_sz(iommu->node, GFP_KERNEL, SZ_1M);
 	if (!ir_table_base) {
-		pr_err("IR%d: failed to allocate pages of order %d\n",
-		       iommu->seq_id, INTR_REMAP_PAGE_ORDER);
+		pr_err("IR%d: failed to allocate 1M of pages\n", iommu->seq_id);
 		goto out_free_table;
 	}
 
diff --git a/drivers/iommu/intel/pasid.c b/drivers/iommu/intel/pasid.c
index 2b6e0706d76d62..3afbad4eb46303 100644
--- a/drivers/iommu/intel/pasid.c
+++ b/drivers/iommu/intel/pasid.c
@@ -60,7 +60,8 @@  int intel_pasid_alloc_table(struct device *dev)
 
 	size = max_pasid >> (PASID_PDE_SHIFT - 3);
 	order = size ? get_order(size) : 0;
-	dir = iommu_alloc_pages_node(info->iommu->node, GFP_KERNEL, order);
+	dir = iommu_alloc_pages_node_sz(info->iommu->node, GFP_KERNEL,
+					1 << (order + PAGE_SHIFT));
 	if (!dir) {
 		kfree(pasid_table);
 		return -ENOMEM;
diff --git a/drivers/iommu/intel/prq.c b/drivers/iommu/intel/prq.c
index 01ecafed31453c..0f8c121a8b3f9d 100644
--- a/drivers/iommu/intel/prq.c
+++ b/drivers/iommu/intel/prq.c
@@ -288,7 +288,8 @@  int intel_iommu_enable_prq(struct intel_iommu *iommu)
 	struct iopf_queue *iopfq;
 	int irq, ret;
 
-	iommu->prq = iommu_alloc_pages_node(iommu->node, GFP_KERNEL, PRQ_ORDER);
+	iommu->prq =
+		iommu_alloc_pages_node_sz(iommu->node, GFP_KERNEL, PRQ_SIZE);
 	if (!iommu->prq) {
 		pr_warn("IOMMU: %s: Failed to allocate page request queue\n",
 			iommu->name);
diff --git a/drivers/iommu/iommu-pages.h b/drivers/iommu/iommu-pages.h
index 7ece83bb0f54bb..b3af2813ed0ced 100644
--- a/drivers/iommu/iommu-pages.h
+++ b/drivers/iommu/iommu-pages.h
@@ -84,22 +84,6 @@  static inline bool iommu_pages_list_empty(struct iommu_pages_list *list)
 	return list_empty(&list->pages);
 }
 
-/**
- * iommu_alloc_pages_node - Allocate a zeroed page of a given order from
- *                          specific NUMA node
- * @nid: memory NUMA node id
- * @gfp: buddy allocator flags
- * @order: page order
- *
- * Returns the virtual address of the allocated page.
- * Prefer to use iommu_alloc_pages_node_lg2()
- */
-static inline void *iommu_alloc_pages_node(int nid, gfp_t gfp,
-					   unsigned int order)
-{
-	return iommu_alloc_pages_node_sz(nid, gfp, 1 << (order + PAGE_SHIFT));
-}
-
 /**
  * iommu_alloc_pages_sz - Allocate a zeroed page of a given size from
  *                          specific NUMA node