diff mbox series

[RFC,v1,03/11] iommu/virtio: Handle incoming page faults

Message ID 20210423095147.27922-4-vivek.gautam@arm.com (mailing list archive)
State New, archived
Headers show
Series iommu/virtio: vSVA support with Arm | expand

Commit Message

Vivek Kumar Gautam April 23, 2021, 9:51 a.m. UTC
Redirect the incoming page faults to the registered fault handler
that can take the fault information such as, pasid, page request
group-id, address and pasid flags.

Signed-off-by: Vivek Gautam <vivek.gautam@arm.com>
---
 drivers/iommu/virtio-iommu.c      | 80 ++++++++++++++++++++++++++++++-
 include/uapi/linux/virtio_iommu.h |  1 +
 2 files changed, 80 insertions(+), 1 deletion(-)

Comments

Jean-Philippe Brucker Sept. 21, 2021, 4:03 p.m. UTC | #1
On Fri, Apr 23, 2021 at 03:21:39PM +0530, Vivek Gautam wrote:
> Redirect the incoming page faults to the registered fault handler
> that can take the fault information such as, pasid, page request
> group-id, address and pasid flags.
> 
> Signed-off-by: Vivek Gautam <vivek.gautam@arm.com>
> ---
>  drivers/iommu/virtio-iommu.c      | 80 ++++++++++++++++++++++++++++++-
>  include/uapi/linux/virtio_iommu.h |  1 +
>  2 files changed, 80 insertions(+), 1 deletion(-)
> 
> diff --git a/drivers/iommu/virtio-iommu.c b/drivers/iommu/virtio-iommu.c
> index c970f386f031..fd237cad1ce5 100644
> --- a/drivers/iommu/virtio-iommu.c
> +++ b/drivers/iommu/virtio-iommu.c
> @@ -37,6 +37,13 @@
>  /* Some architectures need an Address Space ID for each page table */
>  DEFINE_XARRAY_ALLOC1(viommu_asid_xa);
>  
> +struct viommu_dev_pri_work {
> +	struct work_struct		work;
> +	struct viommu_dev		*dev;
> +	struct virtio_iommu_fault	*vfault;
> +	u32				endpoint;
> +};
> +
>  struct viommu_dev {
>  	struct iommu_device		iommu;
>  	struct device			*dev;
> @@ -49,6 +56,8 @@ struct viommu_dev {
>  	struct list_head		requests;
>  	void				*evts;
>  	struct list_head		endpoints;
> +	struct workqueue_struct		*pri_wq;
> +	struct viommu_dev_pri_work	*pri_work;

IOPF already has a workqueue, so the driver doesn't need one.
iommu_report_device_fault() should be fast enough to be called from the
event handler.

>  
>  	/* Device configuration */
>  	struct iommu_domain_geometry	geometry;
> @@ -666,6 +675,58 @@ static int viommu_probe_endpoint(struct viommu_dev *viommu, struct device *dev)
>  	return ret;
>  }
>  
> +static void viommu_handle_ppr(struct work_struct *work)
> +{
> +	struct viommu_dev_pri_work *pwork =
> +				container_of(work, struct viommu_dev_pri_work, work);
> +	struct viommu_dev *viommu = pwork->dev;
> +	struct virtio_iommu_fault *vfault = pwork->vfault;
> +	struct viommu_endpoint *vdev;
> +	struct viommu_ep_entry *ep;
> +	struct iommu_fault_event fault_evt = {
> +		.fault.type = IOMMU_FAULT_PAGE_REQ,
> +	};
> +	struct iommu_fault_page_request *prq = &fault_evt.fault.prm;
> +
> +	u32 flags	= le32_to_cpu(vfault->flags);
> +	u32 prq_flags	= le32_to_cpu(vfault->pr_evt_flags);
> +	u32 endpoint	= pwork->endpoint;
> +
> +	memset(prq, 0, sizeof(struct iommu_fault_page_request));

The fault_evt struct is already initialized

> +	prq->addr = le64_to_cpu(vfault->address);
> +
> +	if (prq_flags & VIRTIO_IOMMU_FAULT_PRQ_F_LAST_PAGE)
> +		prq->flags |= IOMMU_FAULT_PAGE_REQUEST_LAST_PAGE;
> +	if (prq_flags & VIRTIO_IOMMU_FAULT_PRQ_F_PASID_VALID) {
> +		prq->flags |= IOMMU_FAULT_PAGE_REQUEST_PASID_VALID;
> +		prq->pasid = le32_to_cpu(vfault->pasid);
> +		prq->grpid = le32_to_cpu(vfault->grpid);
> +	}
> +
> +	if (flags & VIRTIO_IOMMU_FAULT_F_READ)
> +		prq->perm |= IOMMU_FAULT_PERM_READ;
> +	if (flags & VIRTIO_IOMMU_FAULT_F_WRITE)
> +		prq->perm |= IOMMU_FAULT_PERM_WRITE;
> +	if (flags & VIRTIO_IOMMU_FAULT_F_EXEC)
> +		prq->perm |= IOMMU_FAULT_PERM_EXEC;
> +	if (flags & VIRTIO_IOMMU_FAULT_F_PRIV)
> +		prq->perm |= IOMMU_FAULT_PERM_PRIV;
> +
> +	list_for_each_entry(ep, &viommu->endpoints, list) {
> +		if (ep->eid == endpoint) {
> +			vdev = ep->vdev;
> +			break;
> +		}
> +	}
> +
> +	if ((prq_flags & VIRTIO_IOMMU_FAULT_PRQ_F_PASID_VALID) &&
> +	    (prq_flags & VIRTIO_IOMMU_FAULT_PRQ_F_NEEDS_PASID))
> +		prq->flags |= IOMMU_FAULT_PAGE_RESPONSE_NEEDS_PASID;
> +
> +	if (iommu_report_device_fault(vdev->dev, &fault_evt))
> +		dev_err(vdev->dev, "Couldn't handle page request\n");

An error likely means that nobody registered a fault handler, but we could
display a few more details about the fault that would help debug the
endpoint

> +}
> +
>  static int viommu_fault_handler(struct viommu_dev *viommu,
>  				struct virtio_iommu_fault *fault)
>  {
> @@ -679,7 +740,13 @@ static int viommu_fault_handler(struct viommu_dev *viommu,
>  	u32 pasid	= le32_to_cpu(fault->pasid);
>  
>  	if (type == VIRTIO_IOMMU_FAULT_F_PAGE_REQ) {
> -		dev_info(viommu->dev, "Page request fault - unhandled\n");
> +		dev_info_ratelimited(viommu->dev,
> +				     "Page request fault from EP %u\n",
> +				     endpoint);

That's rather for debugging the virtio-iommu driver, so should be
dev_dbg() (or removed entirely)

> +
> +		viommu->pri_work->vfault = fault;
> +		viommu->pri_work->endpoint = endpoint;
> +		queue_work(viommu->pri_wq, &viommu->pri_work->work);
>  		return 0;
>  	}
>  
> @@ -1683,6 +1750,17 @@ static int viommu_probe(struct virtio_device *vdev)
>  		goto err_free_vqs;
>  	}
>  
> +	viommu->pri_work = kzalloc(sizeof(*viommu->pri_work), GFP_KERNEL);
> +	if (!viommu->pri_work)
> +		return -ENOMEM;
> +
> +	viommu->pri_work->dev = viommu;
> +
> +	INIT_WORK(&viommu->pri_work->work, viommu_handle_ppr);
> +	viommu->pri_wq = create_singlethread_workqueue("viommu-pri-wq");
> +	if (!viommu->pri_wq)
> +		return -ENOMEM;
> +
>  	viommu->map_flags = VIRTIO_IOMMU_MAP_F_READ | VIRTIO_IOMMU_MAP_F_WRITE;
>  	viommu->last_domain = ~0U;
>  
> diff --git a/include/uapi/linux/virtio_iommu.h b/include/uapi/linux/virtio_iommu.h
> index accc3318ce46..53aa88e6b077 100644
> --- a/include/uapi/linux/virtio_iommu.h
> +++ b/include/uapi/linux/virtio_iommu.h
> @@ -302,6 +302,7 @@ struct virtio_iommu_req_invalidate {
>  #define VIRTIO_IOMMU_FAULT_F_READ		(1 << 0)
>  #define VIRTIO_IOMMU_FAULT_F_WRITE		(1 << 1)
>  #define VIRTIO_IOMMU_FAULT_F_EXEC		(1 << 2)
> +#define VIRTIO_IOMMU_FAULT_F_PRIV		(1 << 3)

Should go in the previous patch. (I'd also prefer 'privileged' because in
this context 'priv' is easily read as 'private')

Thanks,
Jean

>  #define VIRTIO_IOMMU_FAULT_F_ADDRESS		(1 << 8)
>  
>  #define VIRTIO_IOMMU_FAULT_F_DMA_UNRECOV	1
> -- 
> 2.17.1
>
Vivek Kumar Gautam Oct. 11, 2021, 8:11 a.m. UTC | #2
Hi Jean,


On Tue, Sep 21, 2021 at 9:33 PM Jean-Philippe Brucker
<jean-philippe@linaro.org> wrote:
>
> On Fri, Apr 23, 2021 at 03:21:39PM +0530, Vivek Gautam wrote:
> > Redirect the incoming page faults to the registered fault handler
> > that can take the fault information such as, pasid, page request
> > group-id, address and pasid flags.
> >
> > Signed-off-by: Vivek Gautam <vivek.gautam@arm.com>
> > ---
> >  drivers/iommu/virtio-iommu.c      | 80 ++++++++++++++++++++++++++++++-
> >  include/uapi/linux/virtio_iommu.h |  1 +
> >  2 files changed, 80 insertions(+), 1 deletion(-)
> >
> > diff --git a/drivers/iommu/virtio-iommu.c b/drivers/iommu/virtio-iommu.c
> > index c970f386f031..fd237cad1ce5 100644
> > --- a/drivers/iommu/virtio-iommu.c
> > +++ b/drivers/iommu/virtio-iommu.c
> > @@ -37,6 +37,13 @@
> >  /* Some architectures need an Address Space ID for each page table */
> >  DEFINE_XARRAY_ALLOC1(viommu_asid_xa);
> >
> > +struct viommu_dev_pri_work {
> > +     struct work_struct              work;
> > +     struct viommu_dev               *dev;
> > +     struct virtio_iommu_fault       *vfault;
> > +     u32                             endpoint;
> > +};
> > +
> >  struct viommu_dev {
> >       struct iommu_device             iommu;
> >       struct device                   *dev;
> > @@ -49,6 +56,8 @@ struct viommu_dev {
> >       struct list_head                requests;
> >       void                            *evts;
> >       struct list_head                endpoints;
> > +     struct workqueue_struct         *pri_wq;
> > +     struct viommu_dev_pri_work      *pri_work;
>
> IOPF already has a workqueue, so the driver doesn't need one.
> iommu_report_device_fault() should be fast enough to be called from the
> event handler.

Sure, will call iommu_report_device_fault() directly from
viommu_fault_handler().

>
> >
> >       /* Device configuration */
> >       struct iommu_domain_geometry    geometry;
> > @@ -666,6 +675,58 @@ static int viommu_probe_endpoint(struct viommu_dev *viommu, struct device *dev)
> >       return ret;
> >  }
> >
> > +static void viommu_handle_ppr(struct work_struct *work)
> > +{
> > +     struct viommu_dev_pri_work *pwork =
> > +                             container_of(work, struct viommu_dev_pri_work, work);
> > +     struct viommu_dev *viommu = pwork->dev;
> > +     struct virtio_iommu_fault *vfault = pwork->vfault;
> > +     struct viommu_endpoint *vdev;
> > +     struct viommu_ep_entry *ep;
> > +     struct iommu_fault_event fault_evt = {
> > +             .fault.type = IOMMU_FAULT_PAGE_REQ,
> > +     };
> > +     struct iommu_fault_page_request *prq = &fault_evt.fault.prm;
> > +
> > +     u32 flags       = le32_to_cpu(vfault->flags);
> > +     u32 prq_flags   = le32_to_cpu(vfault->pr_evt_flags);
> > +     u32 endpoint    = pwork->endpoint;
> > +
> > +     memset(prq, 0, sizeof(struct iommu_fault_page_request));
>
> The fault_evt struct is already initialized

Right, I will remove this line.

>
> > +     prq->addr = le64_to_cpu(vfault->address);
> > +
> > +     if (prq_flags & VIRTIO_IOMMU_FAULT_PRQ_F_LAST_PAGE)
> > +             prq->flags |= IOMMU_FAULT_PAGE_REQUEST_LAST_PAGE;
> > +     if (prq_flags & VIRTIO_IOMMU_FAULT_PRQ_F_PASID_VALID) {
> > +             prq->flags |= IOMMU_FAULT_PAGE_REQUEST_PASID_VALID;
> > +             prq->pasid = le32_to_cpu(vfault->pasid);
> > +             prq->grpid = le32_to_cpu(vfault->grpid);
> > +     }
> > +
> > +     if (flags & VIRTIO_IOMMU_FAULT_F_READ)
> > +             prq->perm |= IOMMU_FAULT_PERM_READ;
> > +     if (flags & VIRTIO_IOMMU_FAULT_F_WRITE)
> > +             prq->perm |= IOMMU_FAULT_PERM_WRITE;
> > +     if (flags & VIRTIO_IOMMU_FAULT_F_EXEC)
> > +             prq->perm |= IOMMU_FAULT_PERM_EXEC;
> > +     if (flags & VIRTIO_IOMMU_FAULT_F_PRIV)
> > +             prq->perm |= IOMMU_FAULT_PERM_PRIV;
> > +
> > +     list_for_each_entry(ep, &viommu->endpoints, list) {
> > +             if (ep->eid == endpoint) {
> > +                     vdev = ep->vdev;

I have a question here though -
Is endpoint-ID unique across all the endpoints available per 'viommu_dev' or
per 'viommu_domain'?
If it is per 'viommu_domain' then the above list is also incorrect.
As you pointed to in the patch [1] -
[PATCH RFC v1 02/11] iommu/virtio: Maintain a list of endpoints served
by viommu_dev
I am planning to add endpoint ID into a static global xarray in
viommu_probe_device() as below:

        vdev_for_each_id(i, eid, vdev) {
                ret = xa_insert(&viommu_ep_ids, eid, vdev, GFP_KERNEL);
                if (ret)
                        goto err_free_dev;
        }

and replace the above list traversal as below:

                xa_lock_irqsave(&viommu_ep_ids, flags);
                xa_for_each(&viommu_ep_ids, eid, vdev) {
                        if (eid == endpoint) {
                                ret =
iommu_report_device_fault(vdev->dev, &fault_evt);
                                if (ret)
                                        dev_err(vdev->dev, "Couldn't
handle page request\n");
                        }
                }
                xa_unlock_irqrestore(&viommu_ep_ids, flags);

But using a global xarray would also be incorrect if the endpointsID are global
across 'viommu_domain'.

I need to find the correct 'viommu_endpoint' to call iommu_report_device_fault()
with the correct device.

> > +                     break;
> > +             }
> > +     }
> > +
> > +     if ((prq_flags & VIRTIO_IOMMU_FAULT_PRQ_F_PASID_VALID) &&
> > +         (prq_flags & VIRTIO_IOMMU_FAULT_PRQ_F_NEEDS_PASID))
> > +             prq->flags |= IOMMU_FAULT_PAGE_RESPONSE_NEEDS_PASID;
> > +
> > +     if (iommu_report_device_fault(vdev->dev, &fault_evt))
> > +             dev_err(vdev->dev, "Couldn't handle page request\n");
>
> An error likely means that nobody registered a fault handler, but we could
> display a few more details about the fault that would help debug the
> endpoint

Sure, will add more debug info to this log.

>
> > +}
> > +
> >  static int viommu_fault_handler(struct viommu_dev *viommu,
> >                               struct virtio_iommu_fault *fault)
> >  {
> > @@ -679,7 +740,13 @@ static int viommu_fault_handler(struct viommu_dev *viommu,
> >       u32 pasid       = le32_to_cpu(fault->pasid);
> >
> >       if (type == VIRTIO_IOMMU_FAULT_F_PAGE_REQ) {
> > -             dev_info(viommu->dev, "Page request fault - unhandled\n");
> > +             dev_info_ratelimited(viommu->dev,
> > +                                  "Page request fault from EP %u\n",
> > +                                  endpoint);
>
> That's rather for debugging the virtio-iommu driver, so should be
> dev_dbg() (or removed entirely)

I will remove this log.

>
> > +
> > +             viommu->pri_work->vfault = fault;
> > +             viommu->pri_work->endpoint = endpoint;
> > +             queue_work(viommu->pri_wq, &viommu->pri_work->work);
> >               return 0;
> >       }
> >
> > @@ -1683,6 +1750,17 @@ static int viommu_probe(struct virtio_device *vdev)
> >               goto err_free_vqs;
> >       }
> >
> > +     viommu->pri_work = kzalloc(sizeof(*viommu->pri_work), GFP_KERNEL);
> > +     if (!viommu->pri_work)
> > +             return -ENOMEM;
> > +
> > +     viommu->pri_work->dev = viommu;
> > +
> > +     INIT_WORK(&viommu->pri_work->work, viommu_handle_ppr);
> > +     viommu->pri_wq = create_singlethread_workqueue("viommu-pri-wq");
> > +     if (!viommu->pri_wq)
> > +             return -ENOMEM;
> > +
> >       viommu->map_flags = VIRTIO_IOMMU_MAP_F_READ | VIRTIO_IOMMU_MAP_F_WRITE;
> >       viommu->last_domain = ~0U;
> >
> > diff --git a/include/uapi/linux/virtio_iommu.h b/include/uapi/linux/virtio_iommu.h
> > index accc3318ce46..53aa88e6b077 100644
> > --- a/include/uapi/linux/virtio_iommu.h
> > +++ b/include/uapi/linux/virtio_iommu.h
> > @@ -302,6 +302,7 @@ struct virtio_iommu_req_invalidate {
> >  #define VIRTIO_IOMMU_FAULT_F_READ            (1 << 0)
> >  #define VIRTIO_IOMMU_FAULT_F_WRITE           (1 << 1)
> >  #define VIRTIO_IOMMU_FAULT_F_EXEC            (1 << 2)
> > +#define VIRTIO_IOMMU_FAULT_F_PRIV            (1 << 3)
>
> Should go in the previous patch. (I'd also prefer 'privileged' because in
> this context 'priv' is easily read as 'private')

Sure, will move this to the previous patch.

Thanks & regards
Vivek

[1] https://lore.kernel.org/all/YUoBW13+CvIljUgc@myrica/#t

[snip]
Jean-Philippe Brucker Oct. 11, 2021, 9:16 a.m. UTC | #3
Hi Vivek,

On Mon, Oct 11, 2021 at 01:41:15PM +0530, Vivek Gautam wrote:
> > > +     list_for_each_entry(ep, &viommu->endpoints, list) {
> > > +             if (ep->eid == endpoint) {
> > > +                     vdev = ep->vdev;
> 
> I have a question here though -
> Is endpoint-ID unique across all the endpoints available per 'viommu_dev' or
> per 'viommu_domain'?
> If it is per 'viommu_domain' then the above list is also incorrect.
> As you pointed to in the patch [1] -
> [PATCH RFC v1 02/11] iommu/virtio: Maintain a list of endpoints served
> by viommu_dev
> I am planning to add endpoint ID into a static global xarray in
> viommu_probe_device() as below:
> 
>         vdev_for_each_id(i, eid, vdev) {
>                 ret = xa_insert(&viommu_ep_ids, eid, vdev, GFP_KERNEL);
>                 if (ret)
>                         goto err_free_dev;
>         }
> 
> and replace the above list traversal as below:
> 
>                 xa_lock_irqsave(&viommu_ep_ids, flags);
>                 xa_for_each(&viommu_ep_ids, eid, vdev) {
>                         if (eid == endpoint) {
>                                 ret =
> iommu_report_device_fault(vdev->dev, &fault_evt);
>                                 if (ret)
>                                         dev_err(vdev->dev, "Couldn't
> handle page request\n");
>                         }
>                 }
>                 xa_unlock_irqrestore(&viommu_ep_ids, flags);
> 
> But using a global xarray would also be incorrect if the endpointsID are global
> across 'viommu_domain'.
> 
> I need to find the correct 'viommu_endpoint' to call iommu_report_device_fault()
> with the correct device.

The endpoint IDs are only unique across viommu_dev, so a global xarray
wouldn't work but one in viommu_dev would. In vdomain it doesn't work
either because we can't get to the domain from the fault handler without
first finding the endpoint

Thanks,
Jean
Vivek Kumar Gautam Oct. 11, 2021, 9:20 a.m. UTC | #4
Hi Jean,


On 10/11/21 2:46 PM, Jean-Philippe Brucker wrote:
> Hi Vivek,
> 
> On Mon, Oct 11, 2021 at 01:41:15PM +0530, Vivek Gautam wrote:
>>>> +     list_for_each_entry(ep, &viommu->endpoints, list) {
>>>> +             if (ep->eid == endpoint) {
>>>> +                     vdev = ep->vdev;
>>
>> I have a question here though -
>> Is endpoint-ID unique across all the endpoints available per 'viommu_dev' or
>> per 'viommu_domain'?
>> If it is per 'viommu_domain' then the above list is also incorrect.
>> As you pointed to in the patch [1] -
>> [PATCH RFC v1 02/11] iommu/virtio: Maintain a list of endpoints served
>> by viommu_dev
>> I am planning to add endpoint ID into a static global xarray in
>> viommu_probe_device() as below:
>>
>>          vdev_for_each_id(i, eid, vdev) {
>>                  ret = xa_insert(&viommu_ep_ids, eid, vdev, GFP_KERNEL);
>>                  if (ret)
>>                          goto err_free_dev;
>>          }
>>
>> and replace the above list traversal as below:
>>
>>                  xa_lock_irqsave(&viommu_ep_ids, flags);
>>                  xa_for_each(&viommu_ep_ids, eid, vdev) {
>>                          if (eid == endpoint) {
>>                                  ret =
>> iommu_report_device_fault(vdev->dev, &fault_evt);
>>                                  if (ret)
>>                                          dev_err(vdev->dev, "Couldn't
>> handle page request\n");
>>                          }
>>                  }
>>                  xa_unlock_irqrestore(&viommu_ep_ids, flags);
>>
>> But using a global xarray would also be incorrect if the endpointsID are global
>> across 'viommu_domain'.
>>
>> I need to find the correct 'viommu_endpoint' to call iommu_report_device_fault()
>> with the correct device.
> 
> The endpoint IDs are only unique across viommu_dev, so a global xarray
> wouldn't work but one in viommu_dev would. In vdomain it doesn't work
> either because we can't get to the domain from the fault handler without
> first finding the endpoint

Thanks. That's easy then. Will have a xarray in viommu_dev and iterate 
over it from the fault handler.

Best regards
Vivek
diff mbox series

Patch

diff --git a/drivers/iommu/virtio-iommu.c b/drivers/iommu/virtio-iommu.c
index c970f386f031..fd237cad1ce5 100644
--- a/drivers/iommu/virtio-iommu.c
+++ b/drivers/iommu/virtio-iommu.c
@@ -37,6 +37,13 @@ 
 /* Some architectures need an Address Space ID for each page table */
 DEFINE_XARRAY_ALLOC1(viommu_asid_xa);
 
+struct viommu_dev_pri_work {
+	struct work_struct		work;
+	struct viommu_dev		*dev;
+	struct virtio_iommu_fault	*vfault;
+	u32				endpoint;
+};
+
 struct viommu_dev {
 	struct iommu_device		iommu;
 	struct device			*dev;
@@ -49,6 +56,8 @@  struct viommu_dev {
 	struct list_head		requests;
 	void				*evts;
 	struct list_head		endpoints;
+	struct workqueue_struct		*pri_wq;
+	struct viommu_dev_pri_work	*pri_work;
 
 	/* Device configuration */
 	struct iommu_domain_geometry	geometry;
@@ -666,6 +675,58 @@  static int viommu_probe_endpoint(struct viommu_dev *viommu, struct device *dev)
 	return ret;
 }
 
+static void viommu_handle_ppr(struct work_struct *work)
+{
+	struct viommu_dev_pri_work *pwork =
+				container_of(work, struct viommu_dev_pri_work, work);
+	struct viommu_dev *viommu = pwork->dev;
+	struct virtio_iommu_fault *vfault = pwork->vfault;
+	struct viommu_endpoint *vdev;
+	struct viommu_ep_entry *ep;
+	struct iommu_fault_event fault_evt = {
+		.fault.type = IOMMU_FAULT_PAGE_REQ,
+	};
+	struct iommu_fault_page_request *prq = &fault_evt.fault.prm;
+
+	u32 flags	= le32_to_cpu(vfault->flags);
+	u32 prq_flags	= le32_to_cpu(vfault->pr_evt_flags);
+	u32 endpoint	= pwork->endpoint;
+
+	memset(prq, 0, sizeof(struct iommu_fault_page_request));
+	prq->addr = le64_to_cpu(vfault->address);
+
+	if (prq_flags & VIRTIO_IOMMU_FAULT_PRQ_F_LAST_PAGE)
+		prq->flags |= IOMMU_FAULT_PAGE_REQUEST_LAST_PAGE;
+	if (prq_flags & VIRTIO_IOMMU_FAULT_PRQ_F_PASID_VALID) {
+		prq->flags |= IOMMU_FAULT_PAGE_REQUEST_PASID_VALID;
+		prq->pasid = le32_to_cpu(vfault->pasid);
+		prq->grpid = le32_to_cpu(vfault->grpid);
+	}
+
+	if (flags & VIRTIO_IOMMU_FAULT_F_READ)
+		prq->perm |= IOMMU_FAULT_PERM_READ;
+	if (flags & VIRTIO_IOMMU_FAULT_F_WRITE)
+		prq->perm |= IOMMU_FAULT_PERM_WRITE;
+	if (flags & VIRTIO_IOMMU_FAULT_F_EXEC)
+		prq->perm |= IOMMU_FAULT_PERM_EXEC;
+	if (flags & VIRTIO_IOMMU_FAULT_F_PRIV)
+		prq->perm |= IOMMU_FAULT_PERM_PRIV;
+
+	list_for_each_entry(ep, &viommu->endpoints, list) {
+		if (ep->eid == endpoint) {
+			vdev = ep->vdev;
+			break;
+		}
+	}
+
+	if ((prq_flags & VIRTIO_IOMMU_FAULT_PRQ_F_PASID_VALID) &&
+	    (prq_flags & VIRTIO_IOMMU_FAULT_PRQ_F_NEEDS_PASID))
+		prq->flags |= IOMMU_FAULT_PAGE_RESPONSE_NEEDS_PASID;
+
+	if (iommu_report_device_fault(vdev->dev, &fault_evt))
+		dev_err(vdev->dev, "Couldn't handle page request\n");
+}
+
 static int viommu_fault_handler(struct viommu_dev *viommu,
 				struct virtio_iommu_fault *fault)
 {
@@ -679,7 +740,13 @@  static int viommu_fault_handler(struct viommu_dev *viommu,
 	u32 pasid	= le32_to_cpu(fault->pasid);
 
 	if (type == VIRTIO_IOMMU_FAULT_F_PAGE_REQ) {
-		dev_info(viommu->dev, "Page request fault - unhandled\n");
+		dev_info_ratelimited(viommu->dev,
+				     "Page request fault from EP %u\n",
+				     endpoint);
+
+		viommu->pri_work->vfault = fault;
+		viommu->pri_work->endpoint = endpoint;
+		queue_work(viommu->pri_wq, &viommu->pri_work->work);
 		return 0;
 	}
 
@@ -1683,6 +1750,17 @@  static int viommu_probe(struct virtio_device *vdev)
 		goto err_free_vqs;
 	}
 
+	viommu->pri_work = kzalloc(sizeof(*viommu->pri_work), GFP_KERNEL);
+	if (!viommu->pri_work)
+		return -ENOMEM;
+
+	viommu->pri_work->dev = viommu;
+
+	INIT_WORK(&viommu->pri_work->work, viommu_handle_ppr);
+	viommu->pri_wq = create_singlethread_workqueue("viommu-pri-wq");
+	if (!viommu->pri_wq)
+		return -ENOMEM;
+
 	viommu->map_flags = VIRTIO_IOMMU_MAP_F_READ | VIRTIO_IOMMU_MAP_F_WRITE;
 	viommu->last_domain = ~0U;
 
diff --git a/include/uapi/linux/virtio_iommu.h b/include/uapi/linux/virtio_iommu.h
index accc3318ce46..53aa88e6b077 100644
--- a/include/uapi/linux/virtio_iommu.h
+++ b/include/uapi/linux/virtio_iommu.h
@@ -302,6 +302,7 @@  struct virtio_iommu_req_invalidate {
 #define VIRTIO_IOMMU_FAULT_F_READ		(1 << 0)
 #define VIRTIO_IOMMU_FAULT_F_WRITE		(1 << 1)
 #define VIRTIO_IOMMU_FAULT_F_EXEC		(1 << 2)
+#define VIRTIO_IOMMU_FAULT_F_PRIV		(1 << 3)
 #define VIRTIO_IOMMU_FAULT_F_ADDRESS		(1 << 8)
 
 #define VIRTIO_IOMMU_FAULT_F_DMA_UNRECOV	1