diff mbox series

[v5,05/20] vfio/iommufd: Add support for iova_ranges and pgsizes

Message ID 20231109114529.1904193-6-zhenzhong.duan@intel.com (mailing list archive)
State New, archived
Headers show
Series vfio: Adopt iommufd | expand

Commit Message

Duan, Zhenzhong Nov. 9, 2023, 11:45 a.m. UTC
Some vIOMMU such as virtio-iommu use iova ranges from host side to
setup reserved ranges for passthrough device, so that guest will not
use an iova range beyond host support.

Use an uAPI of IOMMUFD to get iova ranges of host side and pass to
vIOMMU just like the legacy backend.

Also use out_iova_alignment returned from uAPI as pgsizes instead of
qemu_real_host_page_size() as a fallback.

Signed-off-by: Zhenzhong Duan <zhenzhong.duan@intel.com>
---
v5: Add missed pgsizes initialization in vfio_get_info_iova_range

 hw/vfio/iommufd.c | 48 +++++++++++++++++++++++++++++++++++++++++++++++
 1 file changed, 48 insertions(+)

Comments

Cédric Le Goater Nov. 10, 2023, 9:36 a.m. UTC | #1
On 11/9/23 12:45, Zhenzhong Duan wrote:
> Some vIOMMU such as virtio-iommu use iova ranges from host side to
> setup reserved ranges for passthrough device, so that guest will not
> use an iova range beyond host support.
> 
> Use an uAPI of IOMMUFD to get iova ranges of host side and pass to
> vIOMMU just like the legacy backend.
> 
> Also use out_iova_alignment returned from uAPI as pgsizes instead of
> qemu_real_host_page_size() as a fallback.
> 
> Signed-off-by: Zhenzhong Duan <zhenzhong.duan@intel.com>
> ---
> v5: Add missed pgsizes initialization in vfio_get_info_iova_range
> 
>   hw/vfio/iommufd.c | 48 +++++++++++++++++++++++++++++++++++++++++++++++
>   1 file changed, 48 insertions(+)
> 
> diff --git a/hw/vfio/iommufd.c b/hw/vfio/iommufd.c
> index ea4e23f4ec..958c3e794f 100644
> --- a/hw/vfio/iommufd.c
> +++ b/hw/vfio/iommufd.c
> @@ -267,6 +267,53 @@ static int iommufd_ram_block_discard_disable(bool state)
>       return ram_block_uncoordinated_discard_disable(state);
>   }
>   
> +static int vfio_get_info_iova_range(VFIOIOMMUFDContainer *container,
> +                                    uint32_t ioas_id)
> +{
> +    VFIOContainerBase *bcontainer = &container->bcontainer;
> +    struct iommu_ioas_iova_ranges *info;
> +    struct iommu_iova_range *iova_ranges;
> +    int ret, sz, fd = container->be->fd;
> +
> +    info = g_malloc0(sizeof(*info));
> +    info->size = sizeof(*info);
> +    info->ioas_id = ioas_id;
> +
> +    ret = ioctl(fd, IOMMU_IOAS_IOVA_RANGES, info);
> +    if (ret && errno != EMSGSIZE) {
> +        goto error;
> +    }
> +
> +    sz = info->num_iovas * sizeof(struct iommu_iova_range);
> +    info = g_realloc(info, sizeof(*info) + sz);
> +    info->allowed_iovas = (uintptr_t)(info + 1);
> +
> +    ret = ioctl(fd, IOMMU_IOAS_IOVA_RANGES, info);
> +    if (ret) {
> +        goto error;
> +    }
> +
> +    iova_ranges = (struct iommu_iova_range *)(uintptr_t)info->allowed_iovas;
> +
> +    for (int i = 0; i < info->num_iovas; i++) {
> +        Range *range = g_new(Range, 1);
> +
> +        range_set_bounds(range, iova_ranges[i].start, iova_ranges[i].last);
> +        bcontainer->iova_ranges =
> +            range_list_insert(bcontainer->iova_ranges, range);
> +    }
> +    bcontainer->pgsizes = info->out_iova_alignment;
> +
> +    g_free(info);
> +    return 0;
> +
> +error:
> +    ret = -errno;
> +    g_free(info);
> +    error_report("vfio/iommufd: Cannot get iova ranges: %m");

Can we propagate the error ?

Thanks,

C.


> +    return ret;
> +}
> +
>   static int iommufd_attach_device(const char *name, VFIODevice *vbasedev,
>                                    AddressSpace *as, Error **errp)
>   {
> @@ -343,6 +390,7 @@ static int iommufd_attach_device(const char *name, VFIODevice *vbasedev,
>       }
>   
>       bcontainer->pgsizes = qemu_real_host_page_size();
> +    vfio_get_info_iova_range(container, ioas_id);
>   
>       bcontainer->listener = vfio_memory_listener;
>       memory_listener_register(&bcontainer->listener, bcontainer->space->as);
Duan, Zhenzhong Nov. 10, 2023, 10:03 a.m. UTC | #2
Hi Cédric,

>-----Original Message-----
>From: Cédric Le Goater <clg@redhat.com>
>Sent: Friday, November 10, 2023 5:36 PM
>Subject: Re: [PATCH v5 05/20] vfio/iommufd: Add support for iova_ranges and
>pgsizes
>
>On 11/9/23 12:45, Zhenzhong Duan wrote:
>> Some vIOMMU such as virtio-iommu use iova ranges from host side to
>> setup reserved ranges for passthrough device, so that guest will not
>> use an iova range beyond host support.
>>
>> Use an uAPI of IOMMUFD to get iova ranges of host side and pass to
>> vIOMMU just like the legacy backend.
>>
>> Also use out_iova_alignment returned from uAPI as pgsizes instead of
>> qemu_real_host_page_size() as a fallback.
>>
>> Signed-off-by: Zhenzhong Duan <zhenzhong.duan@intel.com>
>> ---
>> v5: Add missed pgsizes initialization in vfio_get_info_iova_range
>>
>>   hw/vfio/iommufd.c | 48
>+++++++++++++++++++++++++++++++++++++++++++++++
>>   1 file changed, 48 insertions(+)
>>
>> diff --git a/hw/vfio/iommufd.c b/hw/vfio/iommufd.c
>> index ea4e23f4ec..958c3e794f 100644
>> --- a/hw/vfio/iommufd.c
>> +++ b/hw/vfio/iommufd.c
>> @@ -267,6 +267,53 @@ static int iommufd_ram_block_discard_disable(bool
>state)
>>       return ram_block_uncoordinated_discard_disable(state);
>>   }
>>
>> +static int vfio_get_info_iova_range(VFIOIOMMUFDContainer *container,
>> +                                    uint32_t ioas_id)
>> +{
>> +    VFIOContainerBase *bcontainer = &container->bcontainer;
>> +    struct iommu_ioas_iova_ranges *info;
>> +    struct iommu_iova_range *iova_ranges;
>> +    int ret, sz, fd = container->be->fd;
>> +
>> +    info = g_malloc0(sizeof(*info));
>> +    info->size = sizeof(*info);
>> +    info->ioas_id = ioas_id;
>> +
>> +    ret = ioctl(fd, IOMMU_IOAS_IOVA_RANGES, info);
>> +    if (ret && errno != EMSGSIZE) {
>> +        goto error;
>> +    }
>> +
>> +    sz = info->num_iovas * sizeof(struct iommu_iova_range);
>> +    info = g_realloc(info, sizeof(*info) + sz);
>> +    info->allowed_iovas = (uintptr_t)(info + 1);
>> +
>> +    ret = ioctl(fd, IOMMU_IOAS_IOVA_RANGES, info);
>> +    if (ret) {
>> +        goto error;
>> +    }
>> +
>> +    iova_ranges = (struct iommu_iova_range *)(uintptr_t)info->allowed_iovas;
>> +
>> +    for (int i = 0; i < info->num_iovas; i++) {
>> +        Range *range = g_new(Range, 1);
>> +
>> +        range_set_bounds(range, iova_ranges[i].start, iova_ranges[i].last);
>> +        bcontainer->iova_ranges =
>> +            range_list_insert(bcontainer->iova_ranges, range);
>> +    }
>> +    bcontainer->pgsizes = info->out_iova_alignment;
>> +
>> +    g_free(info);
>> +    return 0;
>> +
>> +error:
>> +    ret = -errno;
>> +    g_free(info);
>> +    error_report("vfio/iommufd: Cannot get iova ranges: %m");
>
>Can we propagate the error ?

Do you mean propagating the error to call site and call error_report?
In fact, getting iova range from host is a better to have, not a must.
If fails we fallback to 64bit range.

Thanks
Zhenzhong
diff mbox series

Patch

diff --git a/hw/vfio/iommufd.c b/hw/vfio/iommufd.c
index ea4e23f4ec..958c3e794f 100644
--- a/hw/vfio/iommufd.c
+++ b/hw/vfio/iommufd.c
@@ -267,6 +267,53 @@  static int iommufd_ram_block_discard_disable(bool state)
     return ram_block_uncoordinated_discard_disable(state);
 }
 
+static int vfio_get_info_iova_range(VFIOIOMMUFDContainer *container,
+                                    uint32_t ioas_id)
+{
+    VFIOContainerBase *bcontainer = &container->bcontainer;
+    struct iommu_ioas_iova_ranges *info;
+    struct iommu_iova_range *iova_ranges;
+    int ret, sz, fd = container->be->fd;
+
+    info = g_malloc0(sizeof(*info));
+    info->size = sizeof(*info);
+    info->ioas_id = ioas_id;
+
+    ret = ioctl(fd, IOMMU_IOAS_IOVA_RANGES, info);
+    if (ret && errno != EMSGSIZE) {
+        goto error;
+    }
+
+    sz = info->num_iovas * sizeof(struct iommu_iova_range);
+    info = g_realloc(info, sizeof(*info) + sz);
+    info->allowed_iovas = (uintptr_t)(info + 1);
+
+    ret = ioctl(fd, IOMMU_IOAS_IOVA_RANGES, info);
+    if (ret) {
+        goto error;
+    }
+
+    iova_ranges = (struct iommu_iova_range *)(uintptr_t)info->allowed_iovas;
+
+    for (int i = 0; i < info->num_iovas; i++) {
+        Range *range = g_new(Range, 1);
+
+        range_set_bounds(range, iova_ranges[i].start, iova_ranges[i].last);
+        bcontainer->iova_ranges =
+            range_list_insert(bcontainer->iova_ranges, range);
+    }
+    bcontainer->pgsizes = info->out_iova_alignment;
+
+    g_free(info);
+    return 0;
+
+error:
+    ret = -errno;
+    g_free(info);
+    error_report("vfio/iommufd: Cannot get iova ranges: %m");
+    return ret;
+}
+
 static int iommufd_attach_device(const char *name, VFIODevice *vbasedev,
                                  AddressSpace *as, Error **errp)
 {
@@ -343,6 +390,7 @@  static int iommufd_attach_device(const char *name, VFIODevice *vbasedev,
     }
 
     bcontainer->pgsizes = qemu_real_host_page_size();
+    vfio_get_info_iova_range(container, ioas_id);
 
     bcontainer->listener = vfio_memory_listener;
     memory_listener_register(&bcontainer->listener, bcontainer->space->as);