@@ -262,7 +262,7 @@ static int mmu_map_sg(struct panfrost_device *pfdev, struct panfrost_mmu *mmu,
while (len) {
size_t pgsize = get_pgsize(iova | paddr, len);
- ops->map(ops, iova, paddr, pgsize, prot);
+ ops->map(ops, iova, paddr, pgsize, prot, GFP_KERNEL);
iova += pgsize;
paddr += pgsize;
len -= pgsize;
@@ -2845,7 +2845,7 @@ static int arm_smmu_map(struct iommu_domain *domain, unsigned long iova,
if (!ops)
return -ENODEV;
- return ops->map(ops, iova, paddr, size, prot);
+ return ops->map(ops, iova, paddr, size, prot, gfp);
}
static size_t arm_smmu_unmap(struct iommu_domain *domain, unsigned long iova,
@@ -1237,7 +1237,7 @@ static int arm_smmu_map(struct iommu_domain *domain, unsigned long iova,
return -ENODEV;
arm_smmu_rpm_get(smmu);
- ret = ops->map(ops, iova, paddr, size, prot);
+ ret = ops->map(ops, iova, paddr, size, prot, gfp);
arm_smmu_rpm_put(smmu);
return ret;
@@ -470,7 +470,7 @@ static arm_v7s_iopte arm_v7s_install_table(arm_v7s_iopte *table,
static int __arm_v7s_map(struct arm_v7s_io_pgtable *data, unsigned long iova,
phys_addr_t paddr, size_t size, int prot,
- int lvl, arm_v7s_iopte *ptep)
+ int lvl, arm_v7s_iopte *ptep, gfp_t gfp)
{
struct io_pgtable_cfg *cfg = &data->iop.cfg;
arm_v7s_iopte pte, *cptep;
@@ -491,7 +491,7 @@ static int __arm_v7s_map(struct arm_v7s_io_pgtable *data, unsigned long iova,
/* Grab a pointer to the next level */
pte = READ_ONCE(*ptep);
if (!pte) {
- cptep = __arm_v7s_alloc_table(lvl + 1, GFP_ATOMIC, data);
+ cptep = __arm_v7s_alloc_table(lvl + 1, gfp, data);
if (!cptep)
return -ENOMEM;
@@ -512,11 +512,11 @@ static int __arm_v7s_map(struct arm_v7s_io_pgtable *data, unsigned long iova,
}
/* Rinse, repeat */
- return __arm_v7s_map(data, iova, paddr, size, prot, lvl + 1, cptep);
+ return __arm_v7s_map(data, iova, paddr, size, prot, lvl + 1, cptep, gfp);
}
static int arm_v7s_map(struct io_pgtable_ops *ops, unsigned long iova,
- phys_addr_t paddr, size_t size, int prot)
+ phys_addr_t paddr, size_t size, int prot, gfp_t gfp)
{
struct arm_v7s_io_pgtable *data = io_pgtable_ops_to_data(ops);
struct io_pgtable *iop = &data->iop;
@@ -530,7 +530,7 @@ static int arm_v7s_map(struct io_pgtable_ops *ops, unsigned long iova,
paddr >= (1ULL << data->iop.cfg.oas)))
return -ERANGE;
- ret = __arm_v7s_map(data, iova, paddr, size, prot, 1, data->pgd);
+ ret = __arm_v7s_map(data, iova, paddr, size, prot, 1, data->pgd, gfp);
/*
* Synchronise all PTE updates for the new mapping before there's
* a chance for anything to kick off a table walk for the new iova.
@@ -355,7 +355,7 @@ static arm_lpae_iopte arm_lpae_install_table(arm_lpae_iopte *table,
static int __arm_lpae_map(struct arm_lpae_io_pgtable *data, unsigned long iova,
phys_addr_t paddr, size_t size, arm_lpae_iopte prot,
- int lvl, arm_lpae_iopte *ptep)
+ int lvl, arm_lpae_iopte *ptep, gfp_t gfp)
{
arm_lpae_iopte *cptep, pte;
size_t block_size = ARM_LPAE_BLOCK_SIZE(lvl, data);
@@ -376,7 +376,7 @@ static int __arm_lpae_map(struct arm_lpae_io_pgtable *data, unsigned long iova,
/* Grab a pointer to the next level */
pte = READ_ONCE(*ptep);
if (!pte) {
- cptep = __arm_lpae_alloc_pages(tblsz, GFP_ATOMIC, cfg);
+ cptep = __arm_lpae_alloc_pages(tblsz, gfp, cfg);
if (!cptep)
return -ENOMEM;
@@ -396,7 +396,7 @@ static int __arm_lpae_map(struct arm_lpae_io_pgtable *data, unsigned long iova,
}
/* Rinse, repeat */
- return __arm_lpae_map(data, iova, paddr, size, prot, lvl + 1, cptep);
+ return __arm_lpae_map(data, iova, paddr, size, prot, lvl + 1, cptep, gfp);
}
static arm_lpae_iopte arm_lpae_prot_to_pte(struct arm_lpae_io_pgtable *data,
@@ -461,7 +461,7 @@ static arm_lpae_iopte arm_lpae_prot_to_pte(struct arm_lpae_io_pgtable *data,
}
static int arm_lpae_map(struct io_pgtable_ops *ops, unsigned long iova,
- phys_addr_t paddr, size_t size, int iommu_prot)
+ phys_addr_t paddr, size_t size, int iommu_prot, gfp_t gfp)
{
struct arm_lpae_io_pgtable *data = io_pgtable_ops_to_data(ops);
struct io_pgtable_cfg *cfg = &data->iop.cfg;
@@ -483,7 +483,7 @@ static int arm_lpae_map(struct io_pgtable_ops *ops, unsigned long iova,
return -ERANGE;
prot = arm_lpae_prot_to_pte(data, iommu_prot);
- ret = __arm_lpae_map(data, iova, paddr, size, prot, lvl, ptep);
+ ret = __arm_lpae_map(data, iova, paddr, size, prot, lvl, ptep, gfp);
/*
* Synchronise all PTE updates for the new mapping before there's
* a chance for anything to kick off a table walk for the new iova.
@@ -687,7 +687,7 @@ static int ipmmu_map(struct iommu_domain *io_domain, unsigned long iova,
if (!domain)
return -ENODEV;
- return domain->iop->map(domain->iop, iova, paddr, size, prot);
+ return domain->iop->map(domain->iop, iova, paddr, size, prot, gfp);
}
static size_t ipmmu_unmap(struct iommu_domain *io_domain, unsigned long iova,
@@ -511,7 +511,7 @@ static int msm_iommu_map(struct iommu_domain *domain, unsigned long iova,
int ret;
spin_lock_irqsave(&priv->pgtlock, flags);
- ret = priv->iop->map(priv->iop, iova, pa, len, prot);
+ ret = priv->iop->map(priv->iop, iova, pa, len, prot, GFP_ATOMIC);
spin_unlock_irqrestore(&priv->pgtlock, flags);
return ret;
@@ -397,7 +397,7 @@ static int mtk_iommu_map(struct iommu_domain *domain, unsigned long iova,
paddr |= BIT_ULL(32);
/* Synchronize with the tlb_lock */
- return dom->iop->map(dom->iop, iova, paddr, size, prot);
+ return dom->iop->map(dom->iop, iova, paddr, size, prot, gfp);
}
static size_t mtk_iommu_unmap(struct iommu_domain *domain,
@@ -441,7 +441,7 @@ static int qcom_iommu_map(struct iommu_domain *domain, unsigned long iova,
return -ENODEV;
spin_lock_irqsave(&qcom_domain->pgtbl_lock, flags);
- ret = ops->map(ops, iova, paddr, size, prot);
+ ret = ops->map(ops, iova, paddr, size, prot, GFP_ATOMIC);
spin_unlock_irqrestore(&qcom_domain->pgtbl_lock, flags);
return ret;
}
@@ -155,7 +155,7 @@ struct io_pgtable_cfg {
*/
struct io_pgtable_ops {
int (*map)(struct io_pgtable_ops *ops, unsigned long iova,
- phys_addr_t paddr, size_t size, int prot);
+ phys_addr_t paddr, size_t size, int prot, gfp_t gfp);
size_t (*unmap)(struct io_pgtable_ops *ops, unsigned long iova,
size_t size, struct iommu_iotlb_gather *gather);
phys_addr_t (*iova_to_phys)(struct io_pgtable_ops *ops,
Now the ARM page tables are always allocated by GFP_ATOMIC parameter, but the iommu_ops->map() function has been added a gfp_t parameter by commit 781ca2de89ba ("iommu: Add gfp parameter to iommu_ops::map"), thus io_pgtable_ops->map() should use the gfp parameter passed from iommu_ops->map() to allocate page pages, which can avoid wasting the memory allocators atomic pools for some non-atomic contexts. Signed-off-by: Baolin Wang <baolin.wang@linux.alibaba.com> --- drivers/gpu/drm/panfrost/panfrost_mmu.c | 2 +- drivers/iommu/arm-smmu-v3.c | 2 +- drivers/iommu/arm-smmu.c | 2 +- drivers/iommu/io-pgtable-arm-v7s.c | 10 +++++----- drivers/iommu/io-pgtable-arm.c | 10 +++++----- drivers/iommu/ipmmu-vmsa.c | 2 +- drivers/iommu/msm_iommu.c | 2 +- drivers/iommu/mtk_iommu.c | 2 +- drivers/iommu/qcom_iommu.c | 2 +- include/linux/io-pgtable.h | 2 +- 10 files changed, 18 insertions(+), 18 deletions(-)