Context |
Check |
Description |
bjorn/pre-ci_am |
success
|
Success
|
bjorn/build-rv32-defconfig |
success
|
build-rv32-defconfig
|
bjorn/build-rv64-clang-allmodconfig |
success
|
build-rv64-clang-allmodconfig
|
bjorn/build-rv64-gcc-allmodconfig |
success
|
build-rv64-gcc-allmodconfig
|
bjorn/build-rv64-nommu-k210-defconfig |
success
|
build-rv64-nommu-k210-defconfig
|
bjorn/build-rv64-nommu-k210-virt |
success
|
build-rv64-nommu-k210-virt
|
bjorn/checkpatch |
success
|
checkpatch
|
bjorn/dtb-warn-rv64 |
success
|
dtb-warn-rv64
|
bjorn/header-inline |
success
|
header-inline
|
bjorn/kdoc |
success
|
kdoc
|
bjorn/module-param |
success
|
module-param
|
bjorn/verify-fixes |
success
|
verify-fixes
|
bjorn/verify-signedoff |
success
|
verify-signedoff
|
@@ -737,7 +737,8 @@ static struct dma_pte *pfn_to_dma_pte(struct dmar_domain *domain,
return NULL;
domain_flush_cache(domain, tmp_page, VTD_PAGE_SIZE);
- pteval = ((uint64_t)virt_to_dma_pfn(tmp_page) << VTD_PAGE_SHIFT) | DMA_PTE_READ | DMA_PTE_WRITE;
+ pteval = virt_to_phys(tmp_page) | DMA_PTE_READ |
+ DMA_PTE_WRITE;
if (domain->use_first_level)
pteval |= DMA_FL_PTE_US | DMA_FL_PTE_ACCESS;
@@ -953,25 +953,6 @@ static inline unsigned long lvl_to_nr_pages(unsigned int lvl)
return 1UL << min_t(int, (lvl - 1) * LEVEL_STRIDE, MAX_AGAW_PFN_WIDTH);
}
-/* VT-d pages must always be _smaller_ than MM pages. Otherwise things
- are never going to work. */
-static inline unsigned long mm_to_dma_pfn_start(unsigned long mm_pfn)
-{
- return mm_pfn << (PAGE_SHIFT - VTD_PAGE_SHIFT);
-}
-static inline unsigned long mm_to_dma_pfn_end(unsigned long mm_pfn)
-{
- return ((mm_pfn + 1) << (PAGE_SHIFT - VTD_PAGE_SHIFT)) - 1;
-}
-static inline unsigned long page_to_dma_pfn(struct page *pg)
-{
- return mm_to_dma_pfn_start(page_to_pfn(pg));
-}
-static inline unsigned long virt_to_dma_pfn(void *p)
-{
- return page_to_dma_pfn(virt_to_page(p));
-}
-
static inline void context_set_present(struct context_entry *context)
{
context->lo |= 1;
If all the inlines are unwound virt_to_dma_pfn() is simply: return page_to_pfn(virt_to_page(p)) << (PAGE_SHIFT - VTD_PAGE_SHIFT); Which can be re-arranged to: (page_to_pfn(virt_to_page(p)) << PAGE_SHIFT) >> VTD_PAGE_SHIFT The only caller is: ((uint64_t)virt_to_dma_pfn(tmp_page) << VTD_PAGE_SHIFT) re-arranged to: ((page_to_pfn(virt_to_page(tmp_page)) << PAGE_SHIFT) >> VTD_PAGE_SHIFT) << VTD_PAGE_SHIFT Which simplifies to: page_to_pfn(virt_to_page(tmp_page)) << PAGE_SHIFT That is the same as virt_to_phys(tmp_page), so just remove all of this. Signed-off-by: Jason Gunthorpe <jgg@nvidia.com> --- drivers/iommu/intel/iommu.c | 3 ++- drivers/iommu/intel/iommu.h | 19 ------------------- 2 files changed, 2 insertions(+), 20 deletions(-)