Message ID | 20231115030226.16700-9-baolu.lu@linux.intel.com (mailing list archive) |
---|---|
State | New, archived |
Headers | show |
Series | iommu: Prepare to deliver page faults to user space | expand |
On 2023/11/15 11:02, Lu Baolu wrote: > Move iopf_group data structure to iommu.h to make it a minimal set of > faults that a domain's page fault handler should handle. > > Add a new function, iopf_free_group(), to free a fault group after all > faults in the group are handled. This function will be made global so > that it can be called from other files, such as iommu-sva.c. > > Move iopf_queue data structure to iommu.h to allow the workqueue to be > scheduled out of this file. > > This will simplify the sequential patches. > > Signed-off-by: Lu Baolu <baolu.lu@linux.intel.com> > Reviewed-by: Jason Gunthorpe <jgg@nvidia.com> > Reviewed-by: Kevin Tian <kevin.tian@intel.com> > Tested-by: Yan Zhao <yan.y.zhao@intel.com> > --- > include/linux/iommu.h | 20 +++++++++++++++++++- > drivers/iommu/io-pgfault.c | 37 +++++++++++++------------------------ > 2 files changed, 32 insertions(+), 25 deletions(-) Reviewed-by:Yi Liu <yi.l.liu@intel.com> > diff --git a/include/linux/iommu.h b/include/linux/iommu.h > index 42b62bc8737a..0d3c5a56b078 100644 > --- a/include/linux/iommu.h > +++ b/include/linux/iommu.h > @@ -41,7 +41,6 @@ struct iommu_dirty_ops; > struct notifier_block; > struct iommu_sva; > struct iommu_dma_cookie; > -struct iopf_queue; > > #define IOMMU_FAULT_PERM_READ (1 << 0) /* read */ > #define IOMMU_FAULT_PERM_WRITE (1 << 1) /* write */ > @@ -126,6 +125,25 @@ struct iopf_fault { > struct list_head list; > }; > > +struct iopf_group { > + struct iopf_fault last_fault; > + struct list_head faults; > + struct work_struct work; > + struct device *dev; > +}; > + > +/** > + * struct iopf_queue - IO Page Fault queue > + * @wq: the fault workqueue > + * @devices: devices attached to this queue > + * @lock: protects the device list > + */ > +struct iopf_queue { > + struct workqueue_struct *wq; > + struct list_head devices; > + struct mutex lock; > +}; > + > /* iommu fault flags */ > #define IOMMU_FAULT_READ 0x0 > #define IOMMU_FAULT_WRITE 0x1 > diff --git a/drivers/iommu/io-pgfault.c b/drivers/iommu/io-pgfault.c > index c45977bb7da3..09e05f483b4f 100644 > --- a/drivers/iommu/io-pgfault.c > +++ b/drivers/iommu/io-pgfault.c > @@ -13,24 +13,17 @@ > > #include "iommu-sva.h" > > -/** > - * struct iopf_queue - IO Page Fault queue > - * @wq: the fault workqueue > - * @devices: devices attached to this queue > - * @lock: protects the device list > - */ > -struct iopf_queue { > - struct workqueue_struct *wq; > - struct list_head devices; > - struct mutex lock; > -}; > +static void iopf_free_group(struct iopf_group *group) > +{ > + struct iopf_fault *iopf, *next; > > -struct iopf_group { > - struct iopf_fault last_fault; > - struct list_head faults; > - struct work_struct work; > - struct device *dev; > -}; > + list_for_each_entry_safe(iopf, next, &group->faults, list) { > + if (!(iopf->fault.prm.flags & IOMMU_FAULT_PAGE_REQUEST_LAST_PAGE)) > + kfree(iopf); > + } > + > + kfree(group); > +} > > static int iopf_complete_group(struct device *dev, struct iopf_fault *iopf, > enum iommu_page_response_code status) > @@ -50,9 +43,9 @@ static int iopf_complete_group(struct device *dev, struct iopf_fault *iopf, > > static void iopf_handler(struct work_struct *work) > { > + struct iopf_fault *iopf; > struct iopf_group *group; > struct iommu_domain *domain; > - struct iopf_fault *iopf, *next; > enum iommu_page_response_code status = IOMMU_PAGE_RESP_SUCCESS; > > group = container_of(work, struct iopf_group, work); > @@ -61,7 +54,7 @@ static void iopf_handler(struct work_struct *work) > if (!domain || !domain->iopf_handler) > status = IOMMU_PAGE_RESP_INVALID; > > - list_for_each_entry_safe(iopf, next, &group->faults, list) { > + list_for_each_entry(iopf, &group->faults, list) { > /* > * For the moment, errors are sticky: don't handle subsequent > * faults in the group if there is an error. > @@ -69,14 +62,10 @@ static void iopf_handler(struct work_struct *work) > if (status == IOMMU_PAGE_RESP_SUCCESS) > status = domain->iopf_handler(&iopf->fault, > domain->fault_data); > - > - if (!(iopf->fault.prm.flags & > - IOMMU_FAULT_PAGE_REQUEST_LAST_PAGE)) > - kfree(iopf); > } > > iopf_complete_group(group->dev, &group->last_fault, status); > - kfree(group); > + iopf_free_group(group); > } > > /**
diff --git a/include/linux/iommu.h b/include/linux/iommu.h index 42b62bc8737a..0d3c5a56b078 100644 --- a/include/linux/iommu.h +++ b/include/linux/iommu.h @@ -41,7 +41,6 @@ struct iommu_dirty_ops; struct notifier_block; struct iommu_sva; struct iommu_dma_cookie; -struct iopf_queue; #define IOMMU_FAULT_PERM_READ (1 << 0) /* read */ #define IOMMU_FAULT_PERM_WRITE (1 << 1) /* write */ @@ -126,6 +125,25 @@ struct iopf_fault { struct list_head list; }; +struct iopf_group { + struct iopf_fault last_fault; + struct list_head faults; + struct work_struct work; + struct device *dev; +}; + +/** + * struct iopf_queue - IO Page Fault queue + * @wq: the fault workqueue + * @devices: devices attached to this queue + * @lock: protects the device list + */ +struct iopf_queue { + struct workqueue_struct *wq; + struct list_head devices; + struct mutex lock; +}; + /* iommu fault flags */ #define IOMMU_FAULT_READ 0x0 #define IOMMU_FAULT_WRITE 0x1 diff --git a/drivers/iommu/io-pgfault.c b/drivers/iommu/io-pgfault.c index c45977bb7da3..09e05f483b4f 100644 --- a/drivers/iommu/io-pgfault.c +++ b/drivers/iommu/io-pgfault.c @@ -13,24 +13,17 @@ #include "iommu-sva.h" -/** - * struct iopf_queue - IO Page Fault queue - * @wq: the fault workqueue - * @devices: devices attached to this queue - * @lock: protects the device list - */ -struct iopf_queue { - struct workqueue_struct *wq; - struct list_head devices; - struct mutex lock; -}; +static void iopf_free_group(struct iopf_group *group) +{ + struct iopf_fault *iopf, *next; -struct iopf_group { - struct iopf_fault last_fault; - struct list_head faults; - struct work_struct work; - struct device *dev; -}; + list_for_each_entry_safe(iopf, next, &group->faults, list) { + if (!(iopf->fault.prm.flags & IOMMU_FAULT_PAGE_REQUEST_LAST_PAGE)) + kfree(iopf); + } + + kfree(group); +} static int iopf_complete_group(struct device *dev, struct iopf_fault *iopf, enum iommu_page_response_code status) @@ -50,9 +43,9 @@ static int iopf_complete_group(struct device *dev, struct iopf_fault *iopf, static void iopf_handler(struct work_struct *work) { + struct iopf_fault *iopf; struct iopf_group *group; struct iommu_domain *domain; - struct iopf_fault *iopf, *next; enum iommu_page_response_code status = IOMMU_PAGE_RESP_SUCCESS; group = container_of(work, struct iopf_group, work); @@ -61,7 +54,7 @@ static void iopf_handler(struct work_struct *work) if (!domain || !domain->iopf_handler) status = IOMMU_PAGE_RESP_INVALID; - list_for_each_entry_safe(iopf, next, &group->faults, list) { + list_for_each_entry(iopf, &group->faults, list) { /* * For the moment, errors are sticky: don't handle subsequent * faults in the group if there is an error. @@ -69,14 +62,10 @@ static void iopf_handler(struct work_struct *work) if (status == IOMMU_PAGE_RESP_SUCCESS) status = domain->iopf_handler(&iopf->fault, domain->fault_data); - - if (!(iopf->fault.prm.flags & - IOMMU_FAULT_PAGE_REQUEST_LAST_PAGE)) - kfree(iopf); } iopf_complete_group(group->dev, &group->last_fault, status); - kfree(group); + iopf_free_group(group); } /**