diff mbox series

[RFC,v1,02/11] iommu/virtio: Maintain a list of endpoints served by viommu_dev

Message ID 20210423095147.27922-3-vivek.gautam@arm.com (mailing list archive)
State New, archived
Headers show
Series iommu/virtio: vSVA support with Arm | expand

Commit Message

Vivek Kumar Gautam April 23, 2021, 9:51 a.m. UTC
Keeping a record of list of endpoints that are served by the virtio-iommu
device would help in redirecting the requests of page faults to the
correct endpoint device to handle such requests.

Signed-off-by: Vivek Gautam <vivek.gautam@arm.com>
---
 drivers/iommu/virtio-iommu.c | 21 +++++++++++++++++++++
 1 file changed, 21 insertions(+)

Comments

Jean-Philippe Brucker Sept. 21, 2021, 3:59 p.m. UTC | #1
On Fri, Apr 23, 2021 at 03:21:38PM +0530, Vivek Gautam wrote:
> Keeping a record of list of endpoints that are served by the virtio-iommu
> device would help in redirecting the requests of page faults to the
> correct endpoint device to handle such requests.
> 
> Signed-off-by: Vivek Gautam <vivek.gautam@arm.com>
> ---
>  drivers/iommu/virtio-iommu.c | 21 +++++++++++++++++++++
>  1 file changed, 21 insertions(+)
> 
> diff --git a/drivers/iommu/virtio-iommu.c b/drivers/iommu/virtio-iommu.c
> index 50039070e2aa..c970f386f031 100644
> --- a/drivers/iommu/virtio-iommu.c
> +++ b/drivers/iommu/virtio-iommu.c
> @@ -48,6 +48,7 @@ struct viommu_dev {
>  	spinlock_t			request_lock;
>  	struct list_head		requests;
>  	void				*evts;
> +	struct list_head		endpoints;

As we're going to search by ID, an xarray or rb_tree would be more
appropriate than a list

>  
>  	/* Device configuration */
>  	struct iommu_domain_geometry	geometry;
> @@ -115,6 +116,12 @@ struct viommu_endpoint {
>  	void				*pgtf;
>  };
>  
> +struct viommu_ep_entry {
> +	u32				eid;
> +	struct viommu_endpoint		*vdev;
> +	struct list_head		list;
> +};

No need for a separate struct, I think you can just add the list head and
id into viommu_endpoint.

> +
>  struct viommu_request {
>  	struct list_head		list;
>  	void				*writeback;
> @@ -573,6 +580,7 @@ static int viommu_probe_endpoint(struct viommu_dev *viommu, struct device *dev)
>  	size_t probe_len;
>  	struct virtio_iommu_req_probe *probe;
>  	struct virtio_iommu_probe_property *prop;
> +	struct viommu_ep_entry *ep;
>  	struct iommu_fwspec *fwspec = dev_iommu_fwspec_get(dev);
>  	struct viommu_endpoint *vdev = dev_iommu_priv_get(dev);
>  
> @@ -640,6 +648,18 @@ static int viommu_probe_endpoint(struct viommu_dev *viommu, struct device *dev)
>  		prop = (void *)probe->properties + cur;
>  		type = le16_to_cpu(prop->type) & VIRTIO_IOMMU_PROBE_T_MASK;
>  	}
> +	if (ret)
> +		goto out_free;
> +
> +	ep = kzalloc(sizeof(*ep), GFP_KERNEL);
> +	if (!ep) {
> +		ret = -ENOMEM;
> +		goto out_free;
> +	}
> +	ep->eid = probe->endpoint;
> +	ep->vdev = vdev;
> +
> +	list_add(&ep->list, &viommu->endpoints);

This should be in viommu_probe_device() (viommu_probe_endpoint() is only
called if F_PROBE is negotiated). I think we need a lock for this
list/xarray

Thanks,
Jean

>  
>  out_free:
>  	kfree(probe);
> @@ -1649,6 +1669,7 @@ static int viommu_probe(struct virtio_device *vdev)
>  	viommu->dev = dev;
>  	viommu->vdev = vdev;
>  	INIT_LIST_HEAD(&viommu->requests);
> +	INIT_LIST_HEAD(&viommu->endpoints);
>  
>  	ret = viommu_init_vqs(viommu);
>  	if (ret)
> -- 
> 2.17.1
>
Vivek Kumar Gautam Sept. 30, 2021, 9:17 a.m. UTC | #2
On 9/21/21 9:29 PM, Jean-Philippe Brucker wrote:
> On Fri, Apr 23, 2021 at 03:21:38PM +0530, Vivek Gautam wrote:
>> Keeping a record of list of endpoints that are served by the virtio-iommu
>> device would help in redirecting the requests of page faults to the
>> correct endpoint device to handle such requests.
>>
>> Signed-off-by: Vivek Gautam <vivek.gautam@arm.com>
>> ---
>>   drivers/iommu/virtio-iommu.c | 21 +++++++++++++++++++++
>>   1 file changed, 21 insertions(+)
>>
>> diff --git a/drivers/iommu/virtio-iommu.c b/drivers/iommu/virtio-iommu.c
>> index 50039070e2aa..c970f386f031 100644
>> --- a/drivers/iommu/virtio-iommu.c
>> +++ b/drivers/iommu/virtio-iommu.c
>> @@ -48,6 +48,7 @@ struct viommu_dev {
>>   	spinlock_t			request_lock;
>>   	struct list_head		requests;
>>   	void				*evts;
>> +	struct list_head		endpoints;
> 
> As we're going to search by ID, an xarray or rb_tree would be more
> appropriate than a list

Sure, I will update this with a rb_tree.

> 
>>   
>>   	/* Device configuration */
>>   	struct iommu_domain_geometry	geometry;
>> @@ -115,6 +116,12 @@ struct viommu_endpoint {
>>   	void				*pgtf;
>>   };
>>   
>> +struct viommu_ep_entry {
>> +	u32				eid;
>> +	struct viommu_endpoint		*vdev;
>> +	struct list_head		list;
>> +};
> 
> No need for a separate struct, I think you can just add the list head and
> id into viommu_endpoint.

Yea right. I will update it.

> 
>> +
>>   struct viommu_request {
>>   	struct list_head		list;
>>   	void				*writeback;
>> @@ -573,6 +580,7 @@ static int viommu_probe_endpoint(struct viommu_dev *viommu, struct device *dev)
>>   	size_t probe_len;
>>   	struct virtio_iommu_req_probe *probe;
>>   	struct virtio_iommu_probe_property *prop;
>> +	struct viommu_ep_entry *ep;
>>   	struct iommu_fwspec *fwspec = dev_iommu_fwspec_get(dev);
>>   	struct viommu_endpoint *vdev = dev_iommu_priv_get(dev);
>>   
>> @@ -640,6 +648,18 @@ static int viommu_probe_endpoint(struct viommu_dev *viommu, struct device *dev)
>>   		prop = (void *)probe->properties + cur;
>>   		type = le16_to_cpu(prop->type) & VIRTIO_IOMMU_PROBE_T_MASK;
>>   	}
>> +	if (ret)
>> +		goto out_free;
>> +
>> +	ep = kzalloc(sizeof(*ep), GFP_KERNEL);
>> +	if (!ep) {
>> +		ret = -ENOMEM;
>> +		goto out_free;
>> +	}
>> +	ep->eid = probe->endpoint;
>> +	ep->vdev = vdev;
>> +
>> +	list_add(&ep->list, &viommu->endpoints);
> 
> This should be in viommu_probe_device() (viommu_probe_endpoint() is only
> called if F_PROBE is negotiated). I think we need a lock for this
> list/xarray

Sure, will fix this, and add the needed locking around.

Thanks & regards
Vivek

> 
> Thanks,
> Jean
> 
>>   
>>   out_free:
>>   	kfree(probe);
>> @@ -1649,6 +1669,7 @@ static int viommu_probe(struct virtio_device *vdev)
>>   	viommu->dev = dev;
>>   	viommu->vdev = vdev;
>>   	INIT_LIST_HEAD(&viommu->requests);
>> +	INIT_LIST_HEAD(&viommu->endpoints);
>>   
>>   	ret = viommu_init_vqs(viommu);
>>   	if (ret)
>> -- 
>> 2.17.1
>>
diff mbox series

Patch

diff --git a/drivers/iommu/virtio-iommu.c b/drivers/iommu/virtio-iommu.c
index 50039070e2aa..c970f386f031 100644
--- a/drivers/iommu/virtio-iommu.c
+++ b/drivers/iommu/virtio-iommu.c
@@ -48,6 +48,7 @@  struct viommu_dev {
 	spinlock_t			request_lock;
 	struct list_head		requests;
 	void				*evts;
+	struct list_head		endpoints;
 
 	/* Device configuration */
 	struct iommu_domain_geometry	geometry;
@@ -115,6 +116,12 @@  struct viommu_endpoint {
 	void				*pgtf;
 };
 
+struct viommu_ep_entry {
+	u32				eid;
+	struct viommu_endpoint		*vdev;
+	struct list_head		list;
+};
+
 struct viommu_request {
 	struct list_head		list;
 	void				*writeback;
@@ -573,6 +580,7 @@  static int viommu_probe_endpoint(struct viommu_dev *viommu, struct device *dev)
 	size_t probe_len;
 	struct virtio_iommu_req_probe *probe;
 	struct virtio_iommu_probe_property *prop;
+	struct viommu_ep_entry *ep;
 	struct iommu_fwspec *fwspec = dev_iommu_fwspec_get(dev);
 	struct viommu_endpoint *vdev = dev_iommu_priv_get(dev);
 
@@ -640,6 +648,18 @@  static int viommu_probe_endpoint(struct viommu_dev *viommu, struct device *dev)
 		prop = (void *)probe->properties + cur;
 		type = le16_to_cpu(prop->type) & VIRTIO_IOMMU_PROBE_T_MASK;
 	}
+	if (ret)
+		goto out_free;
+
+	ep = kzalloc(sizeof(*ep), GFP_KERNEL);
+	if (!ep) {
+		ret = -ENOMEM;
+		goto out_free;
+	}
+	ep->eid = probe->endpoint;
+	ep->vdev = vdev;
+
+	list_add(&ep->list, &viommu->endpoints);
 
 out_free:
 	kfree(probe);
@@ -1649,6 +1669,7 @@  static int viommu_probe(struct virtio_device *vdev)
 	viommu->dev = dev;
 	viommu->vdev = vdev;
 	INIT_LIST_HEAD(&viommu->requests);
+	INIT_LIST_HEAD(&viommu->endpoints);
 
 	ret = viommu_init_vqs(viommu);
 	if (ret)