@@ -419,9 +419,10 @@ static void iommu_dma_free_iova(struct iommu_dma_cookie *cookie,
size >> iova_shift(iovad));
}
-static void __iommu_dma_unmap(struct iommu_domain *domain, dma_addr_t dma_addr,
+static void __iommu_dma_unmap(struct device *dev, dma_addr_t dma_addr,
size_t size)
{
+ struct iommu_domain *domain = iommu_get_dma_domain(dev);
struct iommu_dma_cookie *cookie = domain->iova_cookie;
struct iova_domain *iovad = &cookie->iovad;
size_t iova_off = iova_offset(iovad, dma_addr);
@@ -436,8 +437,9 @@ static void __iommu_dma_unmap(struct iommu_domain *domain, dma_addr_t dma_addr,
}
static dma_addr_t __iommu_dma_map(struct device *dev, phys_addr_t phys,
- size_t size, int prot, struct iommu_domain *domain)
+ size_t size, int prot)
{
+ struct iommu_domain *domain = iommu_get_dma_domain(dev);
struct iommu_dma_cookie *cookie = domain->iova_cookie;
size_t iova_off = 0;
dma_addr_t iova;
@@ -536,7 +538,7 @@ static struct page **__iommu_dma_alloc_pages(struct device *dev,
static void __iommu_dma_free(struct device *dev, struct page **pages,
size_t size, dma_addr_t *handle)
{
- __iommu_dma_unmap(iommu_get_dma_domain(dev), *handle, size);
+ __iommu_dma_unmap(dev, *handle, size);
__iommu_dma_free_pages(pages, PAGE_ALIGN(size) >> PAGE_SHIFT);
*handle = DMA_MAPPING_ERROR;
}
@@ -699,14 +701,13 @@ static void iommu_dma_sync_sg_for_device(struct device *dev,
static dma_addr_t __iommu_dma_map_page(struct device *dev, struct page *page,
unsigned long offset, size_t size, int prot)
{
- return __iommu_dma_map(dev, page_to_phys(page) + offset, size, prot,
- iommu_get_dma_domain(dev));
+ return __iommu_dma_map(dev, page_to_phys(page) + offset, size, prot);
}
static void __iommu_dma_unmap_page(struct device *dev, dma_addr_t handle,
size_t size, enum dma_data_direction dir, unsigned long attrs)
{
- __iommu_dma_unmap(iommu_get_dma_domain(dev), handle, size);
+ __iommu_dma_unmap(dev, handle, size);
}
static dma_addr_t iommu_dma_map_page(struct device *dev, struct page *page,
@@ -715,11 +716,10 @@ static dma_addr_t iommu_dma_map_page(struct device *dev, struct page *page,
{
phys_addr_t phys = page_to_phys(page) + offset;
bool coherent = dev_is_dma_coherent(dev);
+ int prot = dma_info_to_prot(dir, coherent, attrs);
dma_addr_t dma_handle;
- dma_handle =__iommu_dma_map(dev, phys, size,
- dma_info_to_prot(dir, coherent, attrs),
- iommu_get_dma_domain(dev));
+ dma_handle =__iommu_dma_map(dev, phys, size, prot);
if (!coherent && !(attrs & DMA_ATTR_SKIP_CPU_SYNC) &&
dma_handle != DMA_MAPPING_ERROR)
arch_sync_dma_for_device(dev, phys, size, dir);
@@ -731,7 +731,7 @@ static void iommu_dma_unmap_page(struct device *dev, dma_addr_t dma_handle,
{
if (!(attrs & DMA_ATTR_SKIP_CPU_SYNC))
iommu_dma_sync_single_for_cpu(dev, dma_handle, size, dir);
- __iommu_dma_unmap(iommu_get_dma_domain(dev), dma_handle, size);
+ __iommu_dma_unmap(dev, dma_handle, size);
}
/*
@@ -912,21 +912,20 @@ static void iommu_dma_unmap_sg(struct device *dev, struct scatterlist *sg,
sg = tmp;
}
end = sg_dma_address(sg) + sg_dma_len(sg);
- __iommu_dma_unmap(iommu_get_dma_domain(dev), start, end - start);
+ __iommu_dma_unmap(dev, start, end - start);
}
static dma_addr_t iommu_dma_map_resource(struct device *dev, phys_addr_t phys,
size_t size, enum dma_data_direction dir, unsigned long attrs)
{
return __iommu_dma_map(dev, phys, size,
- dma_info_to_prot(dir, false, attrs) | IOMMU_MMIO,
- iommu_get_dma_domain(dev));
+ dma_info_to_prot(dir, false, attrs) | IOMMU_MMIO);
}
static void iommu_dma_unmap_resource(struct device *dev, dma_addr_t handle,
size_t size, enum dma_data_direction dir, unsigned long attrs)
{
- __iommu_dma_unmap(iommu_get_dma_domain(dev), handle, size);
+ __iommu_dma_unmap(dev, handle, size);
}
static void *iommu_dma_alloc(struct device *dev, size_t size,
@@ -1176,9 +1175,8 @@ void iommu_setup_dma_ops(struct device *dev, u64 dma_base, u64 size)
}
static struct iommu_dma_msi_page *iommu_dma_get_msi_page(struct device *dev,
- phys_addr_t msi_addr, struct iommu_domain *domain)
+ phys_addr_t msi_addr, struct iommu_dma_cookie *cookie)
{
- struct iommu_dma_cookie *cookie = domain->iova_cookie;
struct iommu_dma_msi_page *msi_page;
dma_addr_t iova;
int prot = IOMMU_WRITE | IOMMU_NOEXEC | IOMMU_MMIO;
@@ -1193,7 +1191,7 @@ static struct iommu_dma_msi_page *iommu_dma_get_msi_page(struct device *dev,
if (!msi_page)
return NULL;
- iova = __iommu_dma_map(dev, msi_addr, size, prot, domain);
+ iova = __iommu_dma_map(dev, msi_addr, size, prot);
if (iova == DMA_MAPPING_ERROR)
goto out_free_page;
@@ -1228,7 +1226,7 @@ void iommu_dma_map_msi_msg(int irq, struct msi_msg *msg)
* of an MSI from within an IPI handler.
*/
spin_lock_irqsave(&cookie->msi_lock, flags);
- msi_page = iommu_dma_get_msi_page(dev, msi_addr, domain);
+ msi_page = iommu_dma_get_msi_page(dev, msi_addr, cookie);
spin_unlock_irqrestore(&cookie->msi_lock, flags);
if (WARN_ON(!msi_page)) {