Message ID | 1588533678-23450-8-git-send-email-raphael.norwitz@nutanix.com (mailing list archive) |
---|---|
State | New, archived |
Headers | show |
Series | vhost-user: Lift Max Ram Slots Limitation | expand |
On Thu, May 21, 2020 at 7:00 AM Raphael Norwitz <raphael.norwitz@nutanix.com> wrote: > The VHOST_USER_GET_MAX_MEM_SLOTS message allows a vhost-user backend to > specify a maximum number of ram slots it is willing to support. This > change adds support for libvhost-user to process this message. For now > the backend will reply with 8 as the maximum number of regions > supported. > > libvhost-user does not yet support the vhost-user protocol feature > VHOST_USER_PROTOCOL_F_CONFIGIRE_MEM_SLOTS, so qemu should never > send the VHOST_USER_GET_MAX_MEM_SLOTS message. Therefore this new > functionality is not currently used. > > Signed-off-by: Raphael Norwitz <raphael.norwitz@nutanix.com> > Reviewed-by: Marc-André Lureau <marcandre.lureau@redhat.com> --- > contrib/libvhost-user/libvhost-user.c | 19 +++++++++++++++++++ > contrib/libvhost-user/libvhost-user.h | 1 + > 2 files changed, 20 insertions(+) > > diff --git a/contrib/libvhost-user/libvhost-user.c > b/contrib/libvhost-user/libvhost-user.c > index cccfa22..9f039b7 100644 > --- a/contrib/libvhost-user/libvhost-user.c > +++ b/contrib/libvhost-user/libvhost-user.c > @@ -137,6 +137,7 @@ vu_request_to_string(unsigned int req) > REQ(VHOST_USER_SET_INFLIGHT_FD), > REQ(VHOST_USER_GPU_SET_SOCKET), > REQ(VHOST_USER_VRING_KICK), > + REQ(VHOST_USER_GET_MAX_MEM_SLOTS), > REQ(VHOST_USER_MAX), > }; > #undef REQ > @@ -1565,6 +1566,22 @@ vu_handle_vring_kick(VuDev *dev, VhostUserMsg *vmsg) > return false; > } > > +static bool vu_handle_get_max_memslots(VuDev *dev, VhostUserMsg *vmsg) > +{ > + vmsg->flags = VHOST_USER_REPLY_MASK | VHOST_USER_VERSION; > + vmsg->size = sizeof(vmsg->payload.u64); > + vmsg->payload.u64 = VHOST_MEMORY_MAX_NREGIONS; > + vmsg->fd_num = 0; > + > + if (!vu_message_write(dev, dev->sock, vmsg)) { > + vu_panic(dev, "Failed to send max ram slots: %s\n", > strerror(errno)); > + } > + > + DPRINT("u64: 0x%016"PRIx64"\n", (uint64_t) VHOST_MEMORY_MAX_NREGIONS); > + > + return false; > +} > + > static bool > vu_process_message(VuDev *dev, VhostUserMsg *vmsg) > { > @@ -1649,6 +1666,8 @@ vu_process_message(VuDev *dev, VhostUserMsg *vmsg) > return vu_set_inflight_fd(dev, vmsg); > case VHOST_USER_VRING_KICK: > return vu_handle_vring_kick(dev, vmsg); > + case VHOST_USER_GET_MAX_MEM_SLOTS: > + return vu_handle_get_max_memslots(dev, vmsg); > default: > vmsg_close_fds(vmsg); > vu_panic(dev, "Unhandled request: %d", vmsg->request); > diff --git a/contrib/libvhost-user/libvhost-user.h > b/contrib/libvhost-user/libvhost-user.h > index f30394f..88ef40d 100644 > --- a/contrib/libvhost-user/libvhost-user.h > +++ b/contrib/libvhost-user/libvhost-user.h > @@ -97,6 +97,7 @@ typedef enum VhostUserRequest { > VHOST_USER_SET_INFLIGHT_FD = 32, > VHOST_USER_GPU_SET_SOCKET = 33, > VHOST_USER_VRING_KICK = 35, > + VHOST_USER_GET_MAX_MEM_SLOTS = 36, > VHOST_USER_MAX > } VhostUserRequest; > > -- > 1.8.3.1 > >
diff --git a/contrib/libvhost-user/libvhost-user.c b/contrib/libvhost-user/libvhost-user.c index cccfa22..9f039b7 100644 --- a/contrib/libvhost-user/libvhost-user.c +++ b/contrib/libvhost-user/libvhost-user.c @@ -137,6 +137,7 @@ vu_request_to_string(unsigned int req) REQ(VHOST_USER_SET_INFLIGHT_FD), REQ(VHOST_USER_GPU_SET_SOCKET), REQ(VHOST_USER_VRING_KICK), + REQ(VHOST_USER_GET_MAX_MEM_SLOTS), REQ(VHOST_USER_MAX), }; #undef REQ @@ -1565,6 +1566,22 @@ vu_handle_vring_kick(VuDev *dev, VhostUserMsg *vmsg) return false; } +static bool vu_handle_get_max_memslots(VuDev *dev, VhostUserMsg *vmsg) +{ + vmsg->flags = VHOST_USER_REPLY_MASK | VHOST_USER_VERSION; + vmsg->size = sizeof(vmsg->payload.u64); + vmsg->payload.u64 = VHOST_MEMORY_MAX_NREGIONS; + vmsg->fd_num = 0; + + if (!vu_message_write(dev, dev->sock, vmsg)) { + vu_panic(dev, "Failed to send max ram slots: %s\n", strerror(errno)); + } + + DPRINT("u64: 0x%016"PRIx64"\n", (uint64_t) VHOST_MEMORY_MAX_NREGIONS); + + return false; +} + static bool vu_process_message(VuDev *dev, VhostUserMsg *vmsg) { @@ -1649,6 +1666,8 @@ vu_process_message(VuDev *dev, VhostUserMsg *vmsg) return vu_set_inflight_fd(dev, vmsg); case VHOST_USER_VRING_KICK: return vu_handle_vring_kick(dev, vmsg); + case VHOST_USER_GET_MAX_MEM_SLOTS: + return vu_handle_get_max_memslots(dev, vmsg); default: vmsg_close_fds(vmsg); vu_panic(dev, "Unhandled request: %d", vmsg->request); diff --git a/contrib/libvhost-user/libvhost-user.h b/contrib/libvhost-user/libvhost-user.h index f30394f..88ef40d 100644 --- a/contrib/libvhost-user/libvhost-user.h +++ b/contrib/libvhost-user/libvhost-user.h @@ -97,6 +97,7 @@ typedef enum VhostUserRequest { VHOST_USER_SET_INFLIGHT_FD = 32, VHOST_USER_GPU_SET_SOCKET = 33, VHOST_USER_VRING_KICK = 35, + VHOST_USER_GET_MAX_MEM_SLOTS = 36, VHOST_USER_MAX } VhostUserRequest;
The VHOST_USER_GET_MAX_MEM_SLOTS message allows a vhost-user backend to specify a maximum number of ram slots it is willing to support. This change adds support for libvhost-user to process this message. For now the backend will reply with 8 as the maximum number of regions supported. libvhost-user does not yet support the vhost-user protocol feature VHOST_USER_PROTOCOL_F_CONFIGIRE_MEM_SLOTS, so qemu should never send the VHOST_USER_GET_MAX_MEM_SLOTS message. Therefore this new functionality is not currently used. Signed-off-by: Raphael Norwitz <raphael.norwitz@nutanix.com> --- contrib/libvhost-user/libvhost-user.c | 19 +++++++++++++++++++ contrib/libvhost-user/libvhost-user.h | 1 + 2 files changed, 20 insertions(+)