Message ID | 1456856877-4817-7-git-send-email-eric.auger@linaro.org (mailing list archive) |
---|---|
State | New, archived |
Headers | show |
Hi Eric, On Tue, Mar 01, 2016 at 06:27:46PM +0000, Eric Auger wrote: >[...] > + > +int iommu_get_single_reserved(struct iommu_domain *domain, > + phys_addr_t addr, int prot, > + dma_addr_t *iova) > +{ > + unsigned long order = __ffs(domain->ops->pgsize_bitmap); > + size_t page_size = 1 << order; > + phys_addr_t mask = page_size - 1; > + phys_addr_t aligned_addr = addr & ~mask; > + phys_addr_t offset = addr - aligned_addr; > + struct iommu_reserved_binding *b; > + struct iova *p_iova; > + struct iova_domain *iovad = > + (struct iova_domain *)domain->reserved_iova_cookie; > + int ret; > + > + if (!iovad) > + return -EINVAL; > + > + mutex_lock(&domain->reserved_mutex); I believe this function could get called from the chunk of __setup_irq that is executed atomically: * request_threaded_irq * __setup_irq * irq_startup * irq_domain_activate_irq * msi_domain_activate * msi_compose * iommu_get_single_reserved If this is the case, we should probably use a spinlock to protect the iova_domain... > + > + b = find_reserved_binding(domain, aligned_addr, page_size); > + if (b) { > + *iova = b->iova + offset; > + kref_get(&b->kref); > + ret = 0; > + goto unlock; > + } > + > + /* there is no existing reserved iova for this pa */ > + p_iova = alloc_iova(iovad, 1, iovad->dma_32bit_pfn, true); > + if (!p_iova) { > + ret = -ENOMEM; > + goto unlock; > + } > + *iova = p_iova->pfn_lo << order; > + > + b = kzalloc(sizeof(*b), GFP_KERNEL); ... and GFP_ATOMIC here. Thanks, Jean-Philippe > + if (!b) { > + ret = -ENOMEM; > + goto free_iova_unlock; > + } > + > + ret = iommu_map(domain, *iova, aligned_addr, page_size, prot); > + if (ret) > + goto free_binding_iova_unlock; > + > + kref_init(&b->kref); > + kref_get(&b->kref); > + b->domain = domain; > + b->addr = aligned_addr; > + b->iova = *iova; > + b->size = page_size; > + > + link_reserved_binding(domain, b); > + > + *iova += offset; > + goto unlock; > + > +free_binding_iova_unlock: > + kfree(b); > +free_iova_unlock: > + free_iova(iovad, *iova >> order); > +unlock: > + mutex_unlock(&domain->reserved_mutex); > + return ret; > +} > +EXPORT_SYMBOL_GPL(iommu_get_single_reserved);
Hi Jean-Philippe, On 03/10/2016 12:52 PM, Jean-Philippe Brucker wrote: > Hi Eric, > > On Tue, Mar 01, 2016 at 06:27:46PM +0000, Eric Auger wrote: >> [...] >> + >> +int iommu_get_single_reserved(struct iommu_domain *domain, >> + phys_addr_t addr, int prot, >> + dma_addr_t *iova) >> +{ >> + unsigned long order = __ffs(domain->ops->pgsize_bitmap); >> + size_t page_size = 1 << order; >> + phys_addr_t mask = page_size - 1; >> + phys_addr_t aligned_addr = addr & ~mask; >> + phys_addr_t offset = addr - aligned_addr; >> + struct iommu_reserved_binding *b; >> + struct iova *p_iova; >> + struct iova_domain *iovad = >> + (struct iova_domain *)domain->reserved_iova_cookie; >> + int ret; >> + >> + if (!iovad) >> + return -EINVAL; >> + >> + mutex_lock(&domain->reserved_mutex); > > I believe this function could get called from the chunk of __setup_irq > that is executed atomically: > > * request_threaded_irq > * __setup_irq > * irq_startup > * irq_domain_activate_irq > * msi_domain_activate > * msi_compose > * iommu_get_single_reserved > > If this is the case, we should probably use a spinlock to protect the > iova_domain... Please apologize for the delay, I was in vacation. Thank you for spotting this flow. I will rework the locking. > >> + >> + b = find_reserved_binding(domain, aligned_addr, page_size); >> + if (b) { >> + *iova = b->iova + offset; >> + kref_get(&b->kref); >> + ret = 0; >> + goto unlock; >> + } >> + >> + /* there is no existing reserved iova for this pa */ >> + p_iova = alloc_iova(iovad, 1, iovad->dma_32bit_pfn, true); >> + if (!p_iova) { >> + ret = -ENOMEM; >> + goto unlock; >> + } >> + *iova = p_iova->pfn_lo << order; >> + >> + b = kzalloc(sizeof(*b), GFP_KERNEL); > > ... and GFP_ATOMIC here. OK Thank you for your time! Best Regards Eric > > Thanks, > Jean-Philippe > >> + if (!b) { >> + ret = -ENOMEM; >> + goto free_iova_unlock; >> + } >> + >> + ret = iommu_map(domain, *iova, aligned_addr, page_size, prot); >> + if (ret) >> + goto free_binding_iova_unlock; >> + >> + kref_init(&b->kref); >> + kref_get(&b->kref); >> + b->domain = domain; >> + b->addr = aligned_addr; >> + b->iova = *iova; >> + b->size = page_size; >> + >> + link_reserved_binding(domain, b); >> + >> + *iova += offset; >> + goto unlock; >> + >> +free_binding_iova_unlock: >> + kfree(b); >> +free_iova_unlock: >> + free_iova(iovad, *iova >> order); >> +unlock: >> + mutex_unlock(&domain->reserved_mutex); >> + return ret; >> +} >> +EXPORT_SYMBOL_GPL(iommu_get_single_reserved);
diff --git a/drivers/iommu/dma-reserved-iommu.c b/drivers/iommu/dma-reserved-iommu.c index 30d54d0..537c83e 100644 --- a/drivers/iommu/dma-reserved-iommu.c +++ b/drivers/iommu/dma-reserved-iommu.c @@ -132,3 +132,118 @@ void iommu_free_reserved_iova_domain(struct iommu_domain *domain) mutex_unlock(&domain->reserved_mutex); } EXPORT_SYMBOL_GPL(iommu_free_reserved_iova_domain); + +int iommu_get_single_reserved(struct iommu_domain *domain, + phys_addr_t addr, int prot, + dma_addr_t *iova) +{ + unsigned long order = __ffs(domain->ops->pgsize_bitmap); + size_t page_size = 1 << order; + phys_addr_t mask = page_size - 1; + phys_addr_t aligned_addr = addr & ~mask; + phys_addr_t offset = addr - aligned_addr; + struct iommu_reserved_binding *b; + struct iova *p_iova; + struct iova_domain *iovad = + (struct iova_domain *)domain->reserved_iova_cookie; + int ret; + + if (!iovad) + return -EINVAL; + + mutex_lock(&domain->reserved_mutex); + + b = find_reserved_binding(domain, aligned_addr, page_size); + if (b) { + *iova = b->iova + offset; + kref_get(&b->kref); + ret = 0; + goto unlock; + } + + /* there is no existing reserved iova for this pa */ + p_iova = alloc_iova(iovad, 1, iovad->dma_32bit_pfn, true); + if (!p_iova) { + ret = -ENOMEM; + goto unlock; + } + *iova = p_iova->pfn_lo << order; + + b = kzalloc(sizeof(*b), GFP_KERNEL); + if (!b) { + ret = -ENOMEM; + goto free_iova_unlock; + } + + ret = iommu_map(domain, *iova, aligned_addr, page_size, prot); + if (ret) + goto free_binding_iova_unlock; + + kref_init(&b->kref); + kref_get(&b->kref); + b->domain = domain; + b->addr = aligned_addr; + b->iova = *iova; + b->size = page_size; + + link_reserved_binding(domain, b); + + *iova += offset; + goto unlock; + +free_binding_iova_unlock: + kfree(b); +free_iova_unlock: + free_iova(iovad, *iova >> order); +unlock: + mutex_unlock(&domain->reserved_mutex); + return ret; +} +EXPORT_SYMBOL_GPL(iommu_get_single_reserved); + +/* called with reserved_mutex locked */ +static void reserved_binding_release(struct kref *kref) +{ + struct iommu_reserved_binding *b = + container_of(kref, struct iommu_reserved_binding, kref); + struct iommu_domain *d = b->domain; + struct iova_domain *iovad = + (struct iova_domain *)d->reserved_iova_cookie; + unsigned long order = __ffs(b->size); + + iommu_unmap(d, b->iova, b->size); + free_iova(iovad, b->iova >> order); + unlink_reserved_binding(d, b); + kfree(b); +} + +void iommu_put_single_reserved(struct iommu_domain *domain, dma_addr_t iova) +{ + unsigned long order; + phys_addr_t aligned_addr; + dma_addr_t aligned_iova, page_size, mask, offset; + struct iommu_reserved_binding *b; + + order = __ffs(domain->ops->pgsize_bitmap); + page_size = (uint64_t)1 << order; + mask = page_size - 1; + + aligned_iova = iova & ~mask; + offset = iova - aligned_iova; + + aligned_addr = iommu_iova_to_phys(domain, aligned_iova); + + mutex_lock(&domain->reserved_mutex); + + b = find_reserved_binding(domain, aligned_addr, page_size); + if (!b) + goto unlock; + kref_put(&b->kref, reserved_binding_release); + +unlock: + mutex_unlock(&domain->reserved_mutex); +} +EXPORT_SYMBOL_GPL(iommu_put_single_reserved); + + + diff --git a/include/linux/dma-reserved-iommu.h b/include/linux/dma-reserved-iommu.h index 5bf863b..71ec800 100644 --- a/include/linux/dma-reserved-iommu.h +++ b/include/linux/dma-reserved-iommu.h @@ -40,6 +40,32 @@ int iommu_alloc_reserved_iova_domain(struct iommu_domain *domain, */ void iommu_free_reserved_iova_domain(struct iommu_domain *domain); +/** + * iommu_get_single_reserved: allocate a reserved iova page and bind + * it onto the page that contains a physical address (@addr) + * + * @domain: iommu domain handle + * @addr: physical address to bind + * @prot: mapping protection attribute + * @iova: returned iova + * + * In case the 2 pages already are bound simply return @iova and + * increment a ref count + */ +int iommu_get_single_reserved(struct iommu_domain *domain, + phys_addr_t addr, int prot, + dma_addr_t *iova); + +/** + * iommu_put_single_reserved: decrement a ref count of the iova page + * + * @domain: iommu domain handle + * @iova: iova whose binding ref count is decremented + * + * if the binding ref count is null, unmap the iova page and release the iova + */ +void iommu_put_single_reserved(struct iommu_domain *domain, dma_addr_t iova); + #endif /* CONFIG_IOMMU_DMA_RESERVED */ #endif /* __KERNEL__ */ #endif /* __DMA_RESERVED_IOMMU_H */