diff mbox series

[25/40] vdpa: add asid to dma_batch_once API

Message ID 1701970793-6865-26-git-send-email-si-wei.liu@oracle.com (mailing list archive)
State New, archived
Headers show
Series vdpa-net: improve migration downtime through descriptor ASID and persistent IOTLB | expand

Commit Message

Si-Wei Liu Dec. 7, 2023, 5:39 p.m. UTC
So that DMA batching API can operate on other ASID than 0.

Signed-off-by: Si-Wei Liu <si-wei.liu@oracle.com>
---
 hw/virtio/trace-events |  4 ++--
 hw/virtio/vhost-vdpa.c | 14 ++++++++------
 2 files changed, 10 insertions(+), 8 deletions(-)

Comments

Eugenio Perez Martin Dec. 13, 2023, 3:42 p.m. UTC | #1
On Thu, Dec 7, 2023 at 7:51 PM Si-Wei Liu <si-wei.liu@oracle.com> wrote:
>
> So that DMA batching API can operate on other ASID than 0.
>
> Signed-off-by: Si-Wei Liu <si-wei.liu@oracle.com>
> ---
>  hw/virtio/trace-events |  4 ++--
>  hw/virtio/vhost-vdpa.c | 14 ++++++++------
>  2 files changed, 10 insertions(+), 8 deletions(-)
>
> diff --git a/hw/virtio/trace-events b/hw/virtio/trace-events
> index 3411a07..196f32f 100644
> --- a/hw/virtio/trace-events
> +++ b/hw/virtio/trace-events
> @@ -32,8 +32,8 @@ vhost_user_create_notifier(int idx, void *n) "idx:%d n:%p"
>  # vhost-vdpa.c
>  vhost_vdpa_dma_map(void *vdpa, int fd, uint32_t msg_type, uint32_t asid, uint64_t iova, uint64_t size, uint64_t uaddr, uint8_t perm, uint8_t type) "vdpa_shared:%p fd: %d msg_type: %"PRIu32" asid: %"PRIu32" iova: 0x%"PRIx64" size: 0x%"PRIx64" uaddr: 0x%"PRIx64" perm: 0x%"PRIx8" type: %"PRIu8
>  vhost_vdpa_dma_unmap(void *vdpa, int fd, uint32_t msg_type, uint32_t asid, uint64_t iova, uint64_t size, uint8_t type) "vdpa_shared:%p fd: %d msg_type: %"PRIu32" asid: %"PRIu32" iova: 0x%"PRIx64" size: 0x%"PRIx64" type: %"PRIu8
> -vhost_vdpa_map_batch_begin(void *v, int fd, uint32_t msg_type, uint8_t type)  "vdpa_shared:%p fd: %d msg_type: %"PRIu32" type: %"PRIu8
> -vhost_vdpa_dma_batch_end(void *v, int fd, uint32_t msg_type, uint8_t type)  "vdpa_shared:%p fd: %d msg_type: %"PRIu32" type: %"PRIu8
> +vhost_vdpa_map_batch_begin(void *v, int fd, uint32_t msg_type, uint8_t type, uint32_t asid)  "vdpa_shared:%p fd: %d msg_type: %"PRIu32" type: %"PRIu8" asid: %"PRIu32
> +vhost_vdpa_dma_batch_end(void *v, int fd, uint32_t msg_type, uint8_t type, uint32_t asid)  "vdpa_shared:%p fd: %d msg_type: %"PRIu32" type: %"PRIu8" asid: %"PRIu32
>  vhost_vdpa_listener_region_add_unaligned(void *v, const char *name, uint64_t offset_as, uint64_t offset_page) "vdpa_shared: %p region %s offset_within_address_space %"PRIu64" offset_within_region %"PRIu64
>  vhost_vdpa_listener_region_add(void *vdpa, uint64_t iova, uint64_t llend, void *vaddr, bool readonly) "vdpa: %p iova 0x%"PRIx64" llend 0x%"PRIx64" vaddr: %p read-only: %d"
>  vhost_vdpa_listener_region_del_unaligned(void *v, const char *name, uint64_t offset_as, uint64_t offset_page) "vdpa_shared: %p region %s offset_within_address_space %"PRIu64" offset_within_region %"PRIu64
> diff --git a/hw/virtio/vhost-vdpa.c b/hw/virtio/vhost-vdpa.c
> index 999a97a..2db2832 100644
> --- a/hw/virtio/vhost-vdpa.c
> +++ b/hw/virtio/vhost-vdpa.c
> @@ -161,11 +161,12 @@ int vhost_vdpa_dma_unmap(VhostVDPAShared *s, uint32_t asid, hwaddr iova,
>      return ret;
>  }
>
> -static bool vhost_vdpa_map_batch_begin(VhostVDPAShared *s)
> +static bool vhost_vdpa_map_batch_begin(VhostVDPAShared *s, uint32_t asid)
>  {
>      int fd = s->device_fd;
>      struct vhost_msg_v2 msg = {
>          .type = VHOST_IOTLB_MSG_V2,
> +        .asid = asid,
>          .iotlb.type = VHOST_IOTLB_BATCH_BEGIN,
>      };
>
> @@ -178,7 +179,7 @@ static bool vhost_vdpa_map_batch_begin(VhostVDPAShared *s)
>          return false;
>      }
>
> -    trace_vhost_vdpa_map_batch_begin(s, fd, msg.type, msg.iotlb.type);
> +    trace_vhost_vdpa_map_batch_begin(s, fd, msg.type, msg.iotlb.type, msg.asid);
>      if (write(fd, &msg, sizeof(msg)) != sizeof(msg)) {
>          error_report("failed to write, fd=%d, errno=%d (%s)",
>                       fd, errno, strerror(errno));
> @@ -193,17 +194,18 @@ static void vhost_vdpa_dma_batch_begin_once(VhostVDPAShared *s)
>          return;
>      }
>
> -    if (vhost_vdpa_map_batch_begin(s)) {
> +    if (vhost_vdpa_map_batch_begin(s, 0)) {
>          s->iotlb_batch_begin_sent = true;
>      }
>  }
>
> -static bool vhost_vdpa_dma_batch_end(VhostVDPAShared *s)
> +static bool vhost_vdpa_dma_batch_end(VhostVDPAShared *s, uint32_t asid)

Maybe adding the asid parameter is not needed? We already have it in
s->asid by the end of the series, and kernel will also complain if
wrong asid is send.

Actually, dma_map and dma_unmap have the asid parameter because maps
out of batch, but I think there are no IOTLB operation out of batch by
the end of the series, isn't it?

Thanks!

>  {
>      struct vhost_msg_v2 msg = {};
>      int fd = s->device_fd;
>
>      msg.type = VHOST_IOTLB_MSG_V2;
> +    msg.asid = asid;
>      msg.iotlb.type = VHOST_IOTLB_BATCH_END;
>
>      if (s->map_thread_enabled && !qemu_thread_is_self(&s->map_thread)) {
> @@ -215,7 +217,7 @@ static bool vhost_vdpa_dma_batch_end(VhostVDPAShared *s)
>          return false;
>      }
>
> -    trace_vhost_vdpa_dma_batch_end(s, fd, msg.type, msg.iotlb.type);
> +    trace_vhost_vdpa_dma_batch_end(s, fd, msg.type, msg.iotlb.type, msg.asid);
>      if (write(fd, &msg, sizeof(msg)) != sizeof(msg)) {
>          error_report("failed to write, fd=%d, errno=%d (%s)",
>                       fd, errno, strerror(errno));
> @@ -233,7 +235,7 @@ static void vhost_vdpa_dma_batch_end_once(VhostVDPAShared *s)
>          return;
>      }
>
> -    if (vhost_vdpa_dma_batch_end(s)) {
> +    if (vhost_vdpa_dma_batch_end(s, 0)) {
>          s->iotlb_batch_begin_sent = false;
>      }
>  }
> --
> 1.8.3.1
>
Jason Wang Jan. 15, 2024, 3:07 a.m. UTC | #2
On Fri, Dec 8, 2023 at 2:51 AM Si-Wei Liu <si-wei.liu@oracle.com> wrote:
>
> So that DMA batching API can operate on other ASID than 0.
>
> Signed-off-by: Si-Wei Liu <si-wei.liu@oracle.com>
> ---
>  hw/virtio/trace-events |  4 ++--
>  hw/virtio/vhost-vdpa.c | 14 ++++++++------
>  2 files changed, 10 insertions(+), 8 deletions(-)
>
> diff --git a/hw/virtio/trace-events b/hw/virtio/trace-events
> index 3411a07..196f32f 100644
> --- a/hw/virtio/trace-events
> +++ b/hw/virtio/trace-events
> @@ -32,8 +32,8 @@ vhost_user_create_notifier(int idx, void *n) "idx:%d n:%p"
>  # vhost-vdpa.c
>  vhost_vdpa_dma_map(void *vdpa, int fd, uint32_t msg_type, uint32_t asid, uint64_t iova, uint64_t size, uint64_t uaddr, uint8_t perm, uint8_t type) "vdpa_shared:%p fd: %d msg_type: %"PRIu32" asid: %"PRIu32" iova: 0x%"PRIx64" size: 0x%"PRIx64" uaddr: 0x%"PRIx64" perm: 0x%"PRIx8" type: %"PRIu8
>  vhost_vdpa_dma_unmap(void *vdpa, int fd, uint32_t msg_type, uint32_t asid, uint64_t iova, uint64_t size, uint8_t type) "vdpa_shared:%p fd: %d msg_type: %"PRIu32" asid: %"PRIu32" iova: 0x%"PRIx64" size: 0x%"PRIx64" type: %"PRIu8
> -vhost_vdpa_map_batch_begin(void *v, int fd, uint32_t msg_type, uint8_t type)  "vdpa_shared:%p fd: %d msg_type: %"PRIu32" type: %"PRIu8
> -vhost_vdpa_dma_batch_end(void *v, int fd, uint32_t msg_type, uint8_t type)  "vdpa_shared:%p fd: %d msg_type: %"PRIu32" type: %"PRIu8
> +vhost_vdpa_map_batch_begin(void *v, int fd, uint32_t msg_type, uint8_t type, uint32_t asid)  "vdpa_shared:%p fd: %d msg_type: %"PRIu32" type: %"PRIu8" asid: %"PRIu32
> +vhost_vdpa_dma_batch_end(void *v, int fd, uint32_t msg_type, uint8_t type, uint32_t asid)  "vdpa_shared:%p fd: %d msg_type: %"PRIu32" type: %"PRIu8" asid: %"PRIu32
>  vhost_vdpa_listener_region_add_unaligned(void *v, const char *name, uint64_t offset_as, uint64_t offset_page) "vdpa_shared: %p region %s offset_within_address_space %"PRIu64" offset_within_region %"PRIu64
>  vhost_vdpa_listener_region_add(void *vdpa, uint64_t iova, uint64_t llend, void *vaddr, bool readonly) "vdpa: %p iova 0x%"PRIx64" llend 0x%"PRIx64" vaddr: %p read-only: %d"
>  vhost_vdpa_listener_region_del_unaligned(void *v, const char *name, uint64_t offset_as, uint64_t offset_page) "vdpa_shared: %p region %s offset_within_address_space %"PRIu64" offset_within_region %"PRIu64
> diff --git a/hw/virtio/vhost-vdpa.c b/hw/virtio/vhost-vdpa.c
> index 999a97a..2db2832 100644
> --- a/hw/virtio/vhost-vdpa.c
> +++ b/hw/virtio/vhost-vdpa.c
> @@ -161,11 +161,12 @@ int vhost_vdpa_dma_unmap(VhostVDPAShared *s, uint32_t asid, hwaddr iova,
>      return ret;
>  }
>
> -static bool vhost_vdpa_map_batch_begin(VhostVDPAShared *s)
> +static bool vhost_vdpa_map_batch_begin(VhostVDPAShared *s, uint32_t asid)
>  {
>      int fd = s->device_fd;
>      struct vhost_msg_v2 msg = {
>          .type = VHOST_IOTLB_MSG_V2,
> +        .asid = asid,

I wonder if we need a check if vhost doesn't support ASID but asid is not zero?

Thanks
diff mbox series

Patch

diff --git a/hw/virtio/trace-events b/hw/virtio/trace-events
index 3411a07..196f32f 100644
--- a/hw/virtio/trace-events
+++ b/hw/virtio/trace-events
@@ -32,8 +32,8 @@  vhost_user_create_notifier(int idx, void *n) "idx:%d n:%p"
 # vhost-vdpa.c
 vhost_vdpa_dma_map(void *vdpa, int fd, uint32_t msg_type, uint32_t asid, uint64_t iova, uint64_t size, uint64_t uaddr, uint8_t perm, uint8_t type) "vdpa_shared:%p fd: %d msg_type: %"PRIu32" asid: %"PRIu32" iova: 0x%"PRIx64" size: 0x%"PRIx64" uaddr: 0x%"PRIx64" perm: 0x%"PRIx8" type: %"PRIu8
 vhost_vdpa_dma_unmap(void *vdpa, int fd, uint32_t msg_type, uint32_t asid, uint64_t iova, uint64_t size, uint8_t type) "vdpa_shared:%p fd: %d msg_type: %"PRIu32" asid: %"PRIu32" iova: 0x%"PRIx64" size: 0x%"PRIx64" type: %"PRIu8
-vhost_vdpa_map_batch_begin(void *v, int fd, uint32_t msg_type, uint8_t type)  "vdpa_shared:%p fd: %d msg_type: %"PRIu32" type: %"PRIu8
-vhost_vdpa_dma_batch_end(void *v, int fd, uint32_t msg_type, uint8_t type)  "vdpa_shared:%p fd: %d msg_type: %"PRIu32" type: %"PRIu8
+vhost_vdpa_map_batch_begin(void *v, int fd, uint32_t msg_type, uint8_t type, uint32_t asid)  "vdpa_shared:%p fd: %d msg_type: %"PRIu32" type: %"PRIu8" asid: %"PRIu32
+vhost_vdpa_dma_batch_end(void *v, int fd, uint32_t msg_type, uint8_t type, uint32_t asid)  "vdpa_shared:%p fd: %d msg_type: %"PRIu32" type: %"PRIu8" asid: %"PRIu32
 vhost_vdpa_listener_region_add_unaligned(void *v, const char *name, uint64_t offset_as, uint64_t offset_page) "vdpa_shared: %p region %s offset_within_address_space %"PRIu64" offset_within_region %"PRIu64
 vhost_vdpa_listener_region_add(void *vdpa, uint64_t iova, uint64_t llend, void *vaddr, bool readonly) "vdpa: %p iova 0x%"PRIx64" llend 0x%"PRIx64" vaddr: %p read-only: %d"
 vhost_vdpa_listener_region_del_unaligned(void *v, const char *name, uint64_t offset_as, uint64_t offset_page) "vdpa_shared: %p region %s offset_within_address_space %"PRIu64" offset_within_region %"PRIu64
diff --git a/hw/virtio/vhost-vdpa.c b/hw/virtio/vhost-vdpa.c
index 999a97a..2db2832 100644
--- a/hw/virtio/vhost-vdpa.c
+++ b/hw/virtio/vhost-vdpa.c
@@ -161,11 +161,12 @@  int vhost_vdpa_dma_unmap(VhostVDPAShared *s, uint32_t asid, hwaddr iova,
     return ret;
 }
 
-static bool vhost_vdpa_map_batch_begin(VhostVDPAShared *s)
+static bool vhost_vdpa_map_batch_begin(VhostVDPAShared *s, uint32_t asid)
 {
     int fd = s->device_fd;
     struct vhost_msg_v2 msg = {
         .type = VHOST_IOTLB_MSG_V2,
+        .asid = asid,
         .iotlb.type = VHOST_IOTLB_BATCH_BEGIN,
     };
 
@@ -178,7 +179,7 @@  static bool vhost_vdpa_map_batch_begin(VhostVDPAShared *s)
         return false;
     }
 
-    trace_vhost_vdpa_map_batch_begin(s, fd, msg.type, msg.iotlb.type);
+    trace_vhost_vdpa_map_batch_begin(s, fd, msg.type, msg.iotlb.type, msg.asid);
     if (write(fd, &msg, sizeof(msg)) != sizeof(msg)) {
         error_report("failed to write, fd=%d, errno=%d (%s)",
                      fd, errno, strerror(errno));
@@ -193,17 +194,18 @@  static void vhost_vdpa_dma_batch_begin_once(VhostVDPAShared *s)
         return;
     }
 
-    if (vhost_vdpa_map_batch_begin(s)) {
+    if (vhost_vdpa_map_batch_begin(s, 0)) {
         s->iotlb_batch_begin_sent = true;
     }
 }
 
-static bool vhost_vdpa_dma_batch_end(VhostVDPAShared *s)
+static bool vhost_vdpa_dma_batch_end(VhostVDPAShared *s, uint32_t asid)
 {
     struct vhost_msg_v2 msg = {};
     int fd = s->device_fd;
 
     msg.type = VHOST_IOTLB_MSG_V2;
+    msg.asid = asid;
     msg.iotlb.type = VHOST_IOTLB_BATCH_END;
 
     if (s->map_thread_enabled && !qemu_thread_is_self(&s->map_thread)) {
@@ -215,7 +217,7 @@  static bool vhost_vdpa_dma_batch_end(VhostVDPAShared *s)
         return false;
     }
 
-    trace_vhost_vdpa_dma_batch_end(s, fd, msg.type, msg.iotlb.type);
+    trace_vhost_vdpa_dma_batch_end(s, fd, msg.type, msg.iotlb.type, msg.asid);
     if (write(fd, &msg, sizeof(msg)) != sizeof(msg)) {
         error_report("failed to write, fd=%d, errno=%d (%s)",
                      fd, errno, strerror(errno));
@@ -233,7 +235,7 @@  static void vhost_vdpa_dma_batch_end_once(VhostVDPAShared *s)
         return;
     }
 
-    if (vhost_vdpa_dma_batch_end(s)) {
+    if (vhost_vdpa_dma_batch_end(s, 0)) {
         s->iotlb_batch_begin_sent = false;
     }
 }