Message ID | 20231115030226.16700-10-baolu.lu@linux.intel.com (mailing list archive) |
---|---|
State | New, archived |
Headers | show |
Series | iommu: Prepare to deliver page faults to user space | expand |
On Wed, Nov 15, 2023 at 11:02:23AM +0800, Lu Baolu wrote: > Make iommu_queue_iopf() more generic by making the iopf_group a minimal > set of iopf's that an iopf handler of domain should handle and respond > to. Add domain parameter to struct iopf_group so that the handler can > retrieve and use it directly. > > Change iommu_queue_iopf() to forward groups of iopf's to the domain's > iopf handler. This is also a necessary step to decouple the sva iopf > handling code from this interface. > > Signed-off-by: Lu Baolu <baolu.lu@linux.intel.com> > Reviewed-by: Kevin Tian <kevin.tian@intel.com> > Tested-by: Yan Zhao <yan.y.zhao@intel.com> > --- > include/linux/iommu.h | 4 +-- > drivers/iommu/iommu-sva.h | 6 ++--- > drivers/iommu/io-pgfault.c | 55 +++++++++++++++++++++++++++++--------- > drivers/iommu/iommu-sva.c | 3 +-- > 4 files changed, 48 insertions(+), 20 deletions(-) Reviewed-by: Jason Gunthorpe <jgg@nvidia.com> Jason
On 2023/11/15 11:02, Lu Baolu wrote: > Make iommu_queue_iopf() more generic by making the iopf_group a minimal > set of iopf's that an iopf handler of domain should handle and respond > to. Add domain parameter to struct iopf_group so that the handler can > retrieve and use it directly. > > Change iommu_queue_iopf() to forward groups of iopf's to the domain's > iopf handler. This is also a necessary step to decouple the sva iopf > handling code from this interface. > > Signed-off-by: Lu Baolu <baolu.lu@linux.intel.com> > Reviewed-by: Kevin Tian <kevin.tian@intel.com> > Tested-by: Yan Zhao <yan.y.zhao@intel.com> > --- > include/linux/iommu.h | 4 +-- > drivers/iommu/iommu-sva.h | 6 ++--- > drivers/iommu/io-pgfault.c | 55 +++++++++++++++++++++++++++++--------- > drivers/iommu/iommu-sva.c | 3 +-- > 4 files changed, 48 insertions(+), 20 deletions(-) > > diff --git a/include/linux/iommu.h b/include/linux/iommu.h > index 0d3c5a56b078..96f6f210093e 100644 > --- a/include/linux/iommu.h > +++ b/include/linux/iommu.h > @@ -130,6 +130,7 @@ struct iopf_group { > struct list_head faults; > struct work_struct work; > struct device *dev; > + struct iommu_domain *domain; > }; > > /** > @@ -209,8 +210,7 @@ struct iommu_domain { > unsigned long pgsize_bitmap; /* Bitmap of page sizes in use */ > struct iommu_domain_geometry geometry; > struct iommu_dma_cookie *iova_cookie; > - enum iommu_page_response_code (*iopf_handler)(struct iommu_fault *fault, > - void *data); > + int (*iopf_handler)(struct iopf_group *group); > void *fault_data; > union { > struct { > diff --git a/drivers/iommu/iommu-sva.h b/drivers/iommu/iommu-sva.h > index de7819c796ce..27c8da115b41 100644 > --- a/drivers/iommu/iommu-sva.h > +++ b/drivers/iommu/iommu-sva.h > @@ -22,8 +22,7 @@ int iopf_queue_flush_dev(struct device *dev); > struct iopf_queue *iopf_queue_alloc(const char *name); > void iopf_queue_free(struct iopf_queue *queue); > int iopf_queue_discard_partial(struct iopf_queue *queue); > -enum iommu_page_response_code > -iommu_sva_handle_iopf(struct iommu_fault *fault, void *data); > +int iommu_sva_handle_iopf(struct iopf_group *group); > > #else /* CONFIG_IOMMU_SVA */ > static inline int iommu_queue_iopf(struct iommu_fault *fault, struct device *dev) > @@ -62,8 +61,7 @@ static inline int iopf_queue_discard_partial(struct iopf_queue *queue) > return -ENODEV; > } > > -static inline enum iommu_page_response_code > -iommu_sva_handle_iopf(struct iommu_fault *fault, void *data) > +static inline int iommu_sva_handle_iopf(struct iopf_group *group) > { > return IOMMU_PAGE_RESP_INVALID; > } > diff --git a/drivers/iommu/io-pgfault.c b/drivers/iommu/io-pgfault.c > index 09e05f483b4f..544653de0d45 100644 > --- a/drivers/iommu/io-pgfault.c > +++ b/drivers/iommu/io-pgfault.c > @@ -13,6 +13,9 @@ > > #include "iommu-sva.h" > > +enum iommu_page_response_code > +iommu_sva_handle_mm(struct iommu_fault *fault, struct mm_struct *mm); > + > static void iopf_free_group(struct iopf_group *group) > { > struct iopf_fault *iopf, *next; > @@ -45,23 +48,18 @@ static void iopf_handler(struct work_struct *work) > { > struct iopf_fault *iopf; > struct iopf_group *group; > - struct iommu_domain *domain; > enum iommu_page_response_code status = IOMMU_PAGE_RESP_SUCCESS; > > group = container_of(work, struct iopf_group, work); > - domain = iommu_get_domain_for_dev_pasid(group->dev, > - group->last_fault.fault.prm.pasid, 0); > - if (!domain || !domain->iopf_handler) > - status = IOMMU_PAGE_RESP_INVALID; > - > list_for_each_entry(iopf, &group->faults, list) { > /* > * For the moment, errors are sticky: don't handle subsequent > * faults in the group if there is an error. > */ > - if (status == IOMMU_PAGE_RESP_SUCCESS) > - status = domain->iopf_handler(&iopf->fault, > - domain->fault_data); > + if (status != IOMMU_PAGE_RESP_SUCCESS) > + break; > + > + status = iommu_sva_handle_mm(&iopf->fault, group->domain->mm); > } > > iopf_complete_group(group->dev, &group->last_fault, status); > @@ -113,6 +111,7 @@ int iommu_queue_iopf(struct iommu_fault *fault, struct device *dev) > int ret; > struct iopf_group *group; > struct iopf_fault *iopf, *next; > + struct iommu_domain *domain = NULL; > struct iommu_fault_param *iopf_param; > struct dev_iommu *param = dev->iommu; > > @@ -143,6 +142,23 @@ int iommu_queue_iopf(struct iommu_fault *fault, struct device *dev) > return 0; > } > > + if (fault->prm.flags & IOMMU_FAULT_PAGE_REQUEST_PASID_VALID) { > + domain = iommu_get_domain_for_dev_pasid(dev, fault->prm.pasid, 0); > + if (IS_ERR(domain)) > + domain = NULL; > + } > + > + if (!domain) > + domain = iommu_get_domain_for_dev(dev); > + > + if (!domain || !domain->iopf_handler) { > + dev_warn_ratelimited(dev, > + "iopf (pasid %d) without domain attached or handler installed\n", > + fault->prm.pasid); > + ret = -ENODEV; > + goto cleanup_partial; > + } > + > group = kzalloc(sizeof(*group), GFP_KERNEL); > if (!group) { > /* > @@ -157,8 +173,8 @@ int iommu_queue_iopf(struct iommu_fault *fault, struct device *dev) > group->dev = dev; > group->last_fault.fault = *fault; > INIT_LIST_HEAD(&group->faults); > + group->domain = domain; > list_add(&group->last_fault.list, &group->faults); > - INIT_WORK(&group->work, iopf_handler); > > /* See if we have partial faults for this group */ > list_for_each_entry_safe(iopf, next, &iopf_param->partial, list) { > @@ -167,9 +183,13 @@ int iommu_queue_iopf(struct iommu_fault *fault, struct device *dev) > list_move(&iopf->list, &group->faults); > } > > - queue_work(iopf_param->queue->wq, &group->work); > - return 0; > + mutex_unlock(&iopf_param->lock); > + ret = domain->iopf_handler(group); > + mutex_lock(&iopf_param->lock); After this change, this function (iommu_queue_iopf) does not queue anything. Should this function be renamed? Except this, I didn't see other problem. Reviewed-by:Yi Liu <yi.l.liu@intel.com> > + if (ret) > + iopf_free_group(group); > > + return ret; > cleanup_partial: > list_for_each_entry_safe(iopf, next, &iopf_param->partial, list) { > if (iopf->fault.prm.grpid == fault->prm.grpid) { > @@ -181,6 +201,17 @@ int iommu_queue_iopf(struct iommu_fault *fault, struct device *dev) > } > EXPORT_SYMBOL_GPL(iommu_queue_iopf); > > +int iommu_sva_handle_iopf(struct iopf_group *group) > +{ > + struct iommu_fault_param *fault_param = group->dev->iommu->fault_param; > + > + INIT_WORK(&group->work, iopf_handler); > + if (!queue_work(fault_param->queue->wq, &group->work)) > + return -EBUSY; > + > + return 0; > +} > + > /** > * iopf_queue_flush_dev - Ensure that all queued faults have been processed > * @dev: the endpoint whose faults need to be flushed. > diff --git a/drivers/iommu/iommu-sva.c b/drivers/iommu/iommu-sva.c > index b78671a8a914..ba0d5b7e106a 100644 > --- a/drivers/iommu/iommu-sva.c > +++ b/drivers/iommu/iommu-sva.c > @@ -149,11 +149,10 @@ EXPORT_SYMBOL_GPL(iommu_sva_get_pasid); > * I/O page fault handler for SVA > */ > enum iommu_page_response_code > -iommu_sva_handle_iopf(struct iommu_fault *fault, void *data) > +iommu_sva_handle_mm(struct iommu_fault *fault, struct mm_struct *mm) > { > vm_fault_t ret; > struct vm_area_struct *vma; > - struct mm_struct *mm = data; > unsigned int access_flags = 0; > unsigned int fault_flags = FAULT_FLAG_REMOTE; > struct iommu_fault_page_request *prm = &fault->prm;
On 2023/12/5 15:13, Yi Liu wrote: >> @@ -157,8 +173,8 @@ int iommu_queue_iopf(struct iommu_fault *fault, >> struct device *dev) >> group->dev = dev; >> group->last_fault.fault = *fault; >> INIT_LIST_HEAD(&group->faults); >> + group->domain = domain; >> list_add(&group->last_fault.list, &group->faults); >> - INIT_WORK(&group->work, iopf_handler); >> /* See if we have partial faults for this group */ >> list_for_each_entry_safe(iopf, next, &iopf_param->partial, list) { >> @@ -167,9 +183,13 @@ int iommu_queue_iopf(struct iommu_fault *fault, >> struct device *dev) >> list_move(&iopf->list, &group->faults); >> } >> - queue_work(iopf_param->queue->wq, &group->work); >> - return 0; >> + mutex_unlock(&iopf_param->lock); >> + ret = domain->iopf_handler(group); >> + mutex_lock(&iopf_param->lock); > > After this change, this function (iommu_queue_iopf) does not queue > anything. Should this function be renamed? Except this, I didn't see > other problem. It's renamed in the next patch. > > Reviewed-by:Yi Liu <yi.l.liu@intel.com> Thank you! Best regards, baolu
diff --git a/include/linux/iommu.h b/include/linux/iommu.h index 0d3c5a56b078..96f6f210093e 100644 --- a/include/linux/iommu.h +++ b/include/linux/iommu.h @@ -130,6 +130,7 @@ struct iopf_group { struct list_head faults; struct work_struct work; struct device *dev; + struct iommu_domain *domain; }; /** @@ -209,8 +210,7 @@ struct iommu_domain { unsigned long pgsize_bitmap; /* Bitmap of page sizes in use */ struct iommu_domain_geometry geometry; struct iommu_dma_cookie *iova_cookie; - enum iommu_page_response_code (*iopf_handler)(struct iommu_fault *fault, - void *data); + int (*iopf_handler)(struct iopf_group *group); void *fault_data; union { struct { diff --git a/drivers/iommu/iommu-sva.h b/drivers/iommu/iommu-sva.h index de7819c796ce..27c8da115b41 100644 --- a/drivers/iommu/iommu-sva.h +++ b/drivers/iommu/iommu-sva.h @@ -22,8 +22,7 @@ int iopf_queue_flush_dev(struct device *dev); struct iopf_queue *iopf_queue_alloc(const char *name); void iopf_queue_free(struct iopf_queue *queue); int iopf_queue_discard_partial(struct iopf_queue *queue); -enum iommu_page_response_code -iommu_sva_handle_iopf(struct iommu_fault *fault, void *data); +int iommu_sva_handle_iopf(struct iopf_group *group); #else /* CONFIG_IOMMU_SVA */ static inline int iommu_queue_iopf(struct iommu_fault *fault, struct device *dev) @@ -62,8 +61,7 @@ static inline int iopf_queue_discard_partial(struct iopf_queue *queue) return -ENODEV; } -static inline enum iommu_page_response_code -iommu_sva_handle_iopf(struct iommu_fault *fault, void *data) +static inline int iommu_sva_handle_iopf(struct iopf_group *group) { return IOMMU_PAGE_RESP_INVALID; } diff --git a/drivers/iommu/io-pgfault.c b/drivers/iommu/io-pgfault.c index 09e05f483b4f..544653de0d45 100644 --- a/drivers/iommu/io-pgfault.c +++ b/drivers/iommu/io-pgfault.c @@ -13,6 +13,9 @@ #include "iommu-sva.h" +enum iommu_page_response_code +iommu_sva_handle_mm(struct iommu_fault *fault, struct mm_struct *mm); + static void iopf_free_group(struct iopf_group *group) { struct iopf_fault *iopf, *next; @@ -45,23 +48,18 @@ static void iopf_handler(struct work_struct *work) { struct iopf_fault *iopf; struct iopf_group *group; - struct iommu_domain *domain; enum iommu_page_response_code status = IOMMU_PAGE_RESP_SUCCESS; group = container_of(work, struct iopf_group, work); - domain = iommu_get_domain_for_dev_pasid(group->dev, - group->last_fault.fault.prm.pasid, 0); - if (!domain || !domain->iopf_handler) - status = IOMMU_PAGE_RESP_INVALID; - list_for_each_entry(iopf, &group->faults, list) { /* * For the moment, errors are sticky: don't handle subsequent * faults in the group if there is an error. */ - if (status == IOMMU_PAGE_RESP_SUCCESS) - status = domain->iopf_handler(&iopf->fault, - domain->fault_data); + if (status != IOMMU_PAGE_RESP_SUCCESS) + break; + + status = iommu_sva_handle_mm(&iopf->fault, group->domain->mm); } iopf_complete_group(group->dev, &group->last_fault, status); @@ -113,6 +111,7 @@ int iommu_queue_iopf(struct iommu_fault *fault, struct device *dev) int ret; struct iopf_group *group; struct iopf_fault *iopf, *next; + struct iommu_domain *domain = NULL; struct iommu_fault_param *iopf_param; struct dev_iommu *param = dev->iommu; @@ -143,6 +142,23 @@ int iommu_queue_iopf(struct iommu_fault *fault, struct device *dev) return 0; } + if (fault->prm.flags & IOMMU_FAULT_PAGE_REQUEST_PASID_VALID) { + domain = iommu_get_domain_for_dev_pasid(dev, fault->prm.pasid, 0); + if (IS_ERR(domain)) + domain = NULL; + } + + if (!domain) + domain = iommu_get_domain_for_dev(dev); + + if (!domain || !domain->iopf_handler) { + dev_warn_ratelimited(dev, + "iopf (pasid %d) without domain attached or handler installed\n", + fault->prm.pasid); + ret = -ENODEV; + goto cleanup_partial; + } + group = kzalloc(sizeof(*group), GFP_KERNEL); if (!group) { /* @@ -157,8 +173,8 @@ int iommu_queue_iopf(struct iommu_fault *fault, struct device *dev) group->dev = dev; group->last_fault.fault = *fault; INIT_LIST_HEAD(&group->faults); + group->domain = domain; list_add(&group->last_fault.list, &group->faults); - INIT_WORK(&group->work, iopf_handler); /* See if we have partial faults for this group */ list_for_each_entry_safe(iopf, next, &iopf_param->partial, list) { @@ -167,9 +183,13 @@ int iommu_queue_iopf(struct iommu_fault *fault, struct device *dev) list_move(&iopf->list, &group->faults); } - queue_work(iopf_param->queue->wq, &group->work); - return 0; + mutex_unlock(&iopf_param->lock); + ret = domain->iopf_handler(group); + mutex_lock(&iopf_param->lock); + if (ret) + iopf_free_group(group); + return ret; cleanup_partial: list_for_each_entry_safe(iopf, next, &iopf_param->partial, list) { if (iopf->fault.prm.grpid == fault->prm.grpid) { @@ -181,6 +201,17 @@ int iommu_queue_iopf(struct iommu_fault *fault, struct device *dev) } EXPORT_SYMBOL_GPL(iommu_queue_iopf); +int iommu_sva_handle_iopf(struct iopf_group *group) +{ + struct iommu_fault_param *fault_param = group->dev->iommu->fault_param; + + INIT_WORK(&group->work, iopf_handler); + if (!queue_work(fault_param->queue->wq, &group->work)) + return -EBUSY; + + return 0; +} + /** * iopf_queue_flush_dev - Ensure that all queued faults have been processed * @dev: the endpoint whose faults need to be flushed. diff --git a/drivers/iommu/iommu-sva.c b/drivers/iommu/iommu-sva.c index b78671a8a914..ba0d5b7e106a 100644 --- a/drivers/iommu/iommu-sva.c +++ b/drivers/iommu/iommu-sva.c @@ -149,11 +149,10 @@ EXPORT_SYMBOL_GPL(iommu_sva_get_pasid); * I/O page fault handler for SVA */ enum iommu_page_response_code -iommu_sva_handle_iopf(struct iommu_fault *fault, void *data) +iommu_sva_handle_mm(struct iommu_fault *fault, struct mm_struct *mm) { vm_fault_t ret; struct vm_area_struct *vma; - struct mm_struct *mm = data; unsigned int access_flags = 0; unsigned int fault_flags = FAULT_FLAG_REMOTE; struct iommu_fault_page_request *prm = &fault->prm;