diff mbox

[v3,12/29] postcopy+vhost-user: Split set_mem_table for postcopy

Message ID 20180216131625.9639-13-dgilbert@redhat.com (mailing list archive)
State New, archived
Headers show

Commit Message

Dr. David Alan Gilbert Feb. 16, 2018, 1:16 p.m. UTC
From: "Dr. David Alan Gilbert" <dgilbert@redhat.com>

Split the set_mem_table routines in both qemu and libvhost-user
because the postcopy versions are going to be quite different
once changes in the later patches are added.

Signed-off-by: Dr. David Alan Gilbert <dgilbert@redhat.com>
---
 contrib/libvhost-user/libvhost-user.c | 53 ++++++++++++++++++++++++
 hw/virtio/vhost-user.c                | 77 ++++++++++++++++++++++++++++++++++-
 2 files changed, 128 insertions(+), 2 deletions(-)

Comments

Peter Xu Feb. 28, 2018, 8:49 a.m. UTC | #1
On Fri, Feb 16, 2018 at 01:16:08PM +0000, Dr. David Alan Gilbert (git) wrote:
> From: "Dr. David Alan Gilbert" <dgilbert@redhat.com>
> 
> Split the set_mem_table routines in both qemu and libvhost-user
> because the postcopy versions are going to be quite different
> once changes in the later patches are added.
> 
> Signed-off-by: Dr. David Alan Gilbert <dgilbert@redhat.com>
> ---
>  contrib/libvhost-user/libvhost-user.c | 53 ++++++++++++++++++++++++
>  hw/virtio/vhost-user.c                | 77 ++++++++++++++++++++++++++++++++++-
>  2 files changed, 128 insertions(+), 2 deletions(-)
> 
> diff --git a/contrib/libvhost-user/libvhost-user.c b/contrib/libvhost-user/libvhost-user.c
> index beec7695a8..4922b2c722 100644
> --- a/contrib/libvhost-user/libvhost-user.c
> +++ b/contrib/libvhost-user/libvhost-user.c
> @@ -448,6 +448,55 @@ vu_reset_device_exec(VuDev *dev, VhostUserMsg *vmsg)
>      return false;
>  }
>  
> +static bool
> +vu_set_mem_table_exec_postcopy(VuDev *dev, VhostUserMsg *vmsg)
> +{
> +    int i;
> +    VhostUserMemory *memory = &vmsg->payload.memory;
> +    dev->nregions = memory->nregions;
> +    /* TODO: Postcopy specific code */
> +    DPRINT("Nregions: %d\n", memory->nregions);
> +    for (i = 0; i < dev->nregions; i++) {
> +        void *mmap_addr;
> +        VhostUserMemoryRegion *msg_region = &memory->regions[i];
> +        VuDevRegion *dev_region = &dev->regions[i];
> +
> +        DPRINT("Region %d\n", i);
> +        DPRINT("    guest_phys_addr: 0x%016"PRIx64"\n",
> +               msg_region->guest_phys_addr);
> +        DPRINT("    memory_size:     0x%016"PRIx64"\n",
> +               msg_region->memory_size);
> +        DPRINT("    userspace_addr   0x%016"PRIx64"\n",
> +               msg_region->userspace_addr);
> +        DPRINT("    mmap_offset      0x%016"PRIx64"\n",
> +               msg_region->mmap_offset);
> +
> +        dev_region->gpa = msg_region->guest_phys_addr;
> +        dev_region->size = msg_region->memory_size;
> +        dev_region->qva = msg_region->userspace_addr;
> +        dev_region->mmap_offset = msg_region->mmap_offset;
> +
> +        /* We don't use offset argument of mmap() since the
> +         * mapped address has to be page aligned, and we use huge
> +         * pages.  */
> +        mmap_addr = mmap(0, dev_region->size + dev_region->mmap_offset,
> +                         PROT_READ | PROT_WRITE, MAP_SHARED,
> +                         vmsg->fds[i], 0);
> +
> +        if (mmap_addr == MAP_FAILED) {
> +            vu_panic(dev, "region mmap error: %s", strerror(errno));
> +        } else {
> +            dev_region->mmap_addr = (uint64_t)(uintptr_t)mmap_addr;
> +            DPRINT("    mmap_addr:       0x%016"PRIx64"\n",
> +                   dev_region->mmap_addr);
> +        }
> +
> +        close(vmsg->fds[i]);
> +    }
> +
> +    return false;
> +}
> +
>  static bool
>  vu_set_mem_table_exec(VuDev *dev, VhostUserMsg *vmsg)
>  {
> @@ -464,6 +513,10 @@ vu_set_mem_table_exec(VuDev *dev, VhostUserMsg *vmsg)
>      }
>      dev->nregions = memory->nregions;
>  
> +    if (dev->postcopy_listening) {
> +        return vu_set_mem_table_exec_postcopy(dev, vmsg);
> +    }
> +
>      DPRINT("Nregions: %d\n", memory->nregions);
>      for (i = 0; i < dev->nregions; i++) {
>          void *mmap_addr;
> diff --git a/hw/virtio/vhost-user.c b/hw/virtio/vhost-user.c
> index ec6a4a82fd..64f4b3b3f9 100644
> --- a/hw/virtio/vhost-user.c
> +++ b/hw/virtio/vhost-user.c
> @@ -325,15 +325,86 @@ static int vhost_user_set_log_base(struct vhost_dev *dev, uint64_t base,
>      return 0;
>  }
>  
> +static int vhost_user_set_mem_table_postcopy(struct vhost_dev *dev,
> +                                             struct vhost_memory *mem)
> +{
> +    int fds[VHOST_MEMORY_MAX_NREGIONS];
> +    int i, fd;
> +    size_t fd_num = 0;
> +    bool reply_supported = virtio_has_feature(dev->protocol_features,
> +                                              VHOST_USER_PROTOCOL_F_REPLY_ACK);
> +    /* TODO: Add actual postcopy differences */
> +    VhostUserMsg msg = {
> +        .hdr.request = VHOST_USER_SET_MEM_TABLE,
> +        .hdr.flags = VHOST_USER_VERSION,
> +    };
> +
> +    if (reply_supported) {
> +        msg.hdr.flags |= VHOST_USER_NEED_REPLY_MASK;
> +    }
> +
> +    for (i = 0; i < dev->mem->nregions; ++i) {
> +        struct vhost_memory_region *reg = dev->mem->regions + i;
> +        ram_addr_t offset;
> +        MemoryRegion *mr;
> +
> +        assert((uintptr_t)reg->userspace_addr == reg->userspace_addr);
> +        mr = memory_region_from_host((void *)(uintptr_t)reg->userspace_addr,
> +                                     &offset);
> +        fd = memory_region_get_fd(mr);
> +        if (fd > 0) {
> +            msg.payload.memory.regions[fd_num].userspace_addr =
> +                reg->userspace_addr;
> +            msg.payload.memory.regions[fd_num].memory_size  = reg->memory_size;
> +            msg.payload.memory.regions[fd_num].guest_phys_addr =
> +                reg->guest_phys_addr;
> +            msg.payload.memory.regions[fd_num].mmap_offset = offset;
> +            assert(fd_num < VHOST_MEMORY_MAX_NREGIONS);
> +            fds[fd_num++] = fd;
> +        }
> +    }
> +
> +    msg.payload.memory.nregions = fd_num;
> +
> +    if (!fd_num) {
> +        error_report("Failed initializing vhost-user memory map, "
> +                     "consider using -object memory-backend-file share=on");
> +        return -1;
> +    }
> +
> +    msg.hdr.size = sizeof(msg.payload.memory.nregions);
> +    msg.hdr.size += sizeof(msg.payload.memory.padding);
> +    msg.hdr.size += fd_num * sizeof(VhostUserMemoryRegion);
> +
> +    if (vhost_user_write(dev, &msg, fds, fd_num) < 0) {
> +        return -1;
> +    }
> +
> +    if (reply_supported) {
> +        return process_message_reply(dev, &msg);
> +    }
> +
> +    return 0;
> +}
> +
>  static int vhost_user_set_mem_table(struct vhost_dev *dev,
>                                      struct vhost_memory *mem)
>  {
> +    struct vhost_user *u = dev->opaque;
>      int fds[VHOST_MEMORY_MAX_NREGIONS];
>      int i, fd;
>      size_t fd_num = 0;
> +    bool do_postcopy = u->postcopy_listen && u->postcopy_fd.handler;
>      bool reply_supported = virtio_has_feature(dev->protocol_features,
>                                                VHOST_USER_PROTOCOL_F_REPLY_ACK);
>  
> +    if (do_postcopy) {
> +        /* Postcopy has enough differences that it's best done in it's own
> +         * version
> +         */
> +        return vhost_user_set_mem_table_postcopy(dev, mem);
> +    }
> +
>      VhostUserMsg msg = {
>          .hdr.request = VHOST_USER_SET_MEM_TABLE,
>          .hdr.flags = VHOST_USER_VERSION,
> @@ -357,9 +428,11 @@ static int vhost_user_set_mem_table(struct vhost_dev *dev,
>                  error_report("Failed preparing vhost-user memory table msg");
>                  return -1;
>              }
> -            msg.payload.memory.regions[fd_num].userspace_addr = reg->userspace_addr;
> +            msg.payload.memory.regions[fd_num].userspace_addr =
> +                reg->userspace_addr;
>              msg.payload.memory.regions[fd_num].memory_size  = reg->memory_size;
> -            msg.payload.memory.regions[fd_num].guest_phys_addr = reg->guest_phys_addr;
> +            msg.payload.memory.regions[fd_num].guest_phys_addr =
> +                reg->guest_phys_addr;

These newline changes might be avoided?

So after this patch there's no functional change, only the code
splittion of set_mem_table operation, right?

Thanks,

>              msg.payload.memory.regions[fd_num].mmap_offset = offset;
>              fds[fd_num++] = fd;
>          }
> -- 
> 2.14.3
>
Dr. David Alan Gilbert March 5, 2018, 6:45 p.m. UTC | #2
* Peter Xu (peterx@redhat.com) wrote:
> On Fri, Feb 16, 2018 at 01:16:08PM +0000, Dr. David Alan Gilbert (git) wrote:
> > From: "Dr. David Alan Gilbert" <dgilbert@redhat.com>
> > 
> > Split the set_mem_table routines in both qemu and libvhost-user
> > because the postcopy versions are going to be quite different
> > once changes in the later patches are added.
> > 
> > Signed-off-by: Dr. David Alan Gilbert <dgilbert@redhat.com>
> > ---
> >  contrib/libvhost-user/libvhost-user.c | 53 ++++++++++++++++++++++++
> >  hw/virtio/vhost-user.c                | 77 ++++++++++++++++++++++++++++++++++-
> >  2 files changed, 128 insertions(+), 2 deletions(-)
> > 
> > diff --git a/contrib/libvhost-user/libvhost-user.c b/contrib/libvhost-user/libvhost-user.c
> > index beec7695a8..4922b2c722 100644
> > --- a/contrib/libvhost-user/libvhost-user.c
> > +++ b/contrib/libvhost-user/libvhost-user.c
> > @@ -448,6 +448,55 @@ vu_reset_device_exec(VuDev *dev, VhostUserMsg *vmsg)
> >      return false;
> >  }
> >  
> > +static bool
> > +vu_set_mem_table_exec_postcopy(VuDev *dev, VhostUserMsg *vmsg)
> > +{
> > +    int i;
> > +    VhostUserMemory *memory = &vmsg->payload.memory;
> > +    dev->nregions = memory->nregions;
> > +    /* TODO: Postcopy specific code */
> > +    DPRINT("Nregions: %d\n", memory->nregions);
> > +    for (i = 0; i < dev->nregions; i++) {
> > +        void *mmap_addr;
> > +        VhostUserMemoryRegion *msg_region = &memory->regions[i];
> > +        VuDevRegion *dev_region = &dev->regions[i];
> > +
> > +        DPRINT("Region %d\n", i);
> > +        DPRINT("    guest_phys_addr: 0x%016"PRIx64"\n",
> > +               msg_region->guest_phys_addr);
> > +        DPRINT("    memory_size:     0x%016"PRIx64"\n",
> > +               msg_region->memory_size);
> > +        DPRINT("    userspace_addr   0x%016"PRIx64"\n",
> > +               msg_region->userspace_addr);
> > +        DPRINT("    mmap_offset      0x%016"PRIx64"\n",
> > +               msg_region->mmap_offset);
> > +
> > +        dev_region->gpa = msg_region->guest_phys_addr;
> > +        dev_region->size = msg_region->memory_size;
> > +        dev_region->qva = msg_region->userspace_addr;
> > +        dev_region->mmap_offset = msg_region->mmap_offset;
> > +
> > +        /* We don't use offset argument of mmap() since the
> > +         * mapped address has to be page aligned, and we use huge
> > +         * pages.  */
> > +        mmap_addr = mmap(0, dev_region->size + dev_region->mmap_offset,
> > +                         PROT_READ | PROT_WRITE, MAP_SHARED,
> > +                         vmsg->fds[i], 0);
> > +
> > +        if (mmap_addr == MAP_FAILED) {
> > +            vu_panic(dev, "region mmap error: %s", strerror(errno));
> > +        } else {
> > +            dev_region->mmap_addr = (uint64_t)(uintptr_t)mmap_addr;
> > +            DPRINT("    mmap_addr:       0x%016"PRIx64"\n",
> > +                   dev_region->mmap_addr);
> > +        }
> > +
> > +        close(vmsg->fds[i]);
> > +    }
> > +
> > +    return false;
> > +}
> > +
> >  static bool
> >  vu_set_mem_table_exec(VuDev *dev, VhostUserMsg *vmsg)
> >  {
> > @@ -464,6 +513,10 @@ vu_set_mem_table_exec(VuDev *dev, VhostUserMsg *vmsg)
> >      }
> >      dev->nregions = memory->nregions;
> >  
> > +    if (dev->postcopy_listening) {
> > +        return vu_set_mem_table_exec_postcopy(dev, vmsg);
> > +    }
> > +
> >      DPRINT("Nregions: %d\n", memory->nregions);
> >      for (i = 0; i < dev->nregions; i++) {
> >          void *mmap_addr;
> > diff --git a/hw/virtio/vhost-user.c b/hw/virtio/vhost-user.c
> > index ec6a4a82fd..64f4b3b3f9 100644
> > --- a/hw/virtio/vhost-user.c
> > +++ b/hw/virtio/vhost-user.c
> > @@ -325,15 +325,86 @@ static int vhost_user_set_log_base(struct vhost_dev *dev, uint64_t base,
> >      return 0;
> >  }
> >  
> > +static int vhost_user_set_mem_table_postcopy(struct vhost_dev *dev,
> > +                                             struct vhost_memory *mem)
> > +{
> > +    int fds[VHOST_MEMORY_MAX_NREGIONS];
> > +    int i, fd;
> > +    size_t fd_num = 0;
> > +    bool reply_supported = virtio_has_feature(dev->protocol_features,
> > +                                              VHOST_USER_PROTOCOL_F_REPLY_ACK);
> > +    /* TODO: Add actual postcopy differences */
> > +    VhostUserMsg msg = {
> > +        .hdr.request = VHOST_USER_SET_MEM_TABLE,
> > +        .hdr.flags = VHOST_USER_VERSION,
> > +    };
> > +
> > +    if (reply_supported) {
> > +        msg.hdr.flags |= VHOST_USER_NEED_REPLY_MASK;
> > +    }
> > +
> > +    for (i = 0; i < dev->mem->nregions; ++i) {
> > +        struct vhost_memory_region *reg = dev->mem->regions + i;
> > +        ram_addr_t offset;
> > +        MemoryRegion *mr;
> > +
> > +        assert((uintptr_t)reg->userspace_addr == reg->userspace_addr);
> > +        mr = memory_region_from_host((void *)(uintptr_t)reg->userspace_addr,
> > +                                     &offset);
> > +        fd = memory_region_get_fd(mr);
> > +        if (fd > 0) {
> > +            msg.payload.memory.regions[fd_num].userspace_addr =
> > +                reg->userspace_addr;
> > +            msg.payload.memory.regions[fd_num].memory_size  = reg->memory_size;
> > +            msg.payload.memory.regions[fd_num].guest_phys_addr =
> > +                reg->guest_phys_addr;
> > +            msg.payload.memory.regions[fd_num].mmap_offset = offset;
> > +            assert(fd_num < VHOST_MEMORY_MAX_NREGIONS);
> > +            fds[fd_num++] = fd;
> > +        }
> > +    }
> > +
> > +    msg.payload.memory.nregions = fd_num;
> > +
> > +    if (!fd_num) {
> > +        error_report("Failed initializing vhost-user memory map, "
> > +                     "consider using -object memory-backend-file share=on");
> > +        return -1;
> > +    }
> > +
> > +    msg.hdr.size = sizeof(msg.payload.memory.nregions);
> > +    msg.hdr.size += sizeof(msg.payload.memory.padding);
> > +    msg.hdr.size += fd_num * sizeof(VhostUserMemoryRegion);
> > +
> > +    if (vhost_user_write(dev, &msg, fds, fd_num) < 0) {
> > +        return -1;
> > +    }
> > +
> > +    if (reply_supported) {
> > +        return process_message_reply(dev, &msg);
> > +    }
> > +
> > +    return 0;
> > +}
> > +
> >  static int vhost_user_set_mem_table(struct vhost_dev *dev,
> >                                      struct vhost_memory *mem)
> >  {
> > +    struct vhost_user *u = dev->opaque;
> >      int fds[VHOST_MEMORY_MAX_NREGIONS];
> >      int i, fd;
> >      size_t fd_num = 0;
> > +    bool do_postcopy = u->postcopy_listen && u->postcopy_fd.handler;
> >      bool reply_supported = virtio_has_feature(dev->protocol_features,
> >                                                VHOST_USER_PROTOCOL_F_REPLY_ACK);
> >  
> > +    if (do_postcopy) {
> > +        /* Postcopy has enough differences that it's best done in it's own
> > +         * version
> > +         */
> > +        return vhost_user_set_mem_table_postcopy(dev, mem);
> > +    }
> > +
> >      VhostUserMsg msg = {
> >          .hdr.request = VHOST_USER_SET_MEM_TABLE,
> >          .hdr.flags = VHOST_USER_VERSION,
> > @@ -357,9 +428,11 @@ static int vhost_user_set_mem_table(struct vhost_dev *dev,
> >                  error_report("Failed preparing vhost-user memory table msg");
> >                  return -1;
> >              }
> > -            msg.payload.memory.regions[fd_num].userspace_addr = reg->userspace_addr;
> > +            msg.payload.memory.regions[fd_num].userspace_addr =
> > +                reg->userspace_addr;
> >              msg.payload.memory.regions[fd_num].memory_size  = reg->memory_size;
> > -            msg.payload.memory.regions[fd_num].guest_phys_addr = reg->guest_phys_addr;
> > +            msg.payload.memory.regions[fd_num].guest_phys_addr =
> > +                reg->guest_phys_addr;
> 
> These newline changes might be avoided?

They could, but they're over 80 chars long, so while I was taking a copy
of the code I fixed the style on this copy so they were consistent.

> So after this patch there's no functional change, only the code
> splittion of set_mem_table operation, right?

Right; the changes to the postcopy version come later.

Dave

> Thanks,
> 
> >              msg.payload.memory.regions[fd_num].mmap_offset = offset;
> >              fds[fd_num++] = fd;
> >          }
> > -- 
> > 2.14.3
> > 
> 
> -- 
> Peter Xu
--
Dr. David Alan Gilbert / dgilbert@redhat.com / Manchester, UK
diff mbox

Patch

diff --git a/contrib/libvhost-user/libvhost-user.c b/contrib/libvhost-user/libvhost-user.c
index beec7695a8..4922b2c722 100644
--- a/contrib/libvhost-user/libvhost-user.c
+++ b/contrib/libvhost-user/libvhost-user.c
@@ -448,6 +448,55 @@  vu_reset_device_exec(VuDev *dev, VhostUserMsg *vmsg)
     return false;
 }
 
+static bool
+vu_set_mem_table_exec_postcopy(VuDev *dev, VhostUserMsg *vmsg)
+{
+    int i;
+    VhostUserMemory *memory = &vmsg->payload.memory;
+    dev->nregions = memory->nregions;
+    /* TODO: Postcopy specific code */
+    DPRINT("Nregions: %d\n", memory->nregions);
+    for (i = 0; i < dev->nregions; i++) {
+        void *mmap_addr;
+        VhostUserMemoryRegion *msg_region = &memory->regions[i];
+        VuDevRegion *dev_region = &dev->regions[i];
+
+        DPRINT("Region %d\n", i);
+        DPRINT("    guest_phys_addr: 0x%016"PRIx64"\n",
+               msg_region->guest_phys_addr);
+        DPRINT("    memory_size:     0x%016"PRIx64"\n",
+               msg_region->memory_size);
+        DPRINT("    userspace_addr   0x%016"PRIx64"\n",
+               msg_region->userspace_addr);
+        DPRINT("    mmap_offset      0x%016"PRIx64"\n",
+               msg_region->mmap_offset);
+
+        dev_region->gpa = msg_region->guest_phys_addr;
+        dev_region->size = msg_region->memory_size;
+        dev_region->qva = msg_region->userspace_addr;
+        dev_region->mmap_offset = msg_region->mmap_offset;
+
+        /* We don't use offset argument of mmap() since the
+         * mapped address has to be page aligned, and we use huge
+         * pages.  */
+        mmap_addr = mmap(0, dev_region->size + dev_region->mmap_offset,
+                         PROT_READ | PROT_WRITE, MAP_SHARED,
+                         vmsg->fds[i], 0);
+
+        if (mmap_addr == MAP_FAILED) {
+            vu_panic(dev, "region mmap error: %s", strerror(errno));
+        } else {
+            dev_region->mmap_addr = (uint64_t)(uintptr_t)mmap_addr;
+            DPRINT("    mmap_addr:       0x%016"PRIx64"\n",
+                   dev_region->mmap_addr);
+        }
+
+        close(vmsg->fds[i]);
+    }
+
+    return false;
+}
+
 static bool
 vu_set_mem_table_exec(VuDev *dev, VhostUserMsg *vmsg)
 {
@@ -464,6 +513,10 @@  vu_set_mem_table_exec(VuDev *dev, VhostUserMsg *vmsg)
     }
     dev->nregions = memory->nregions;
 
+    if (dev->postcopy_listening) {
+        return vu_set_mem_table_exec_postcopy(dev, vmsg);
+    }
+
     DPRINT("Nregions: %d\n", memory->nregions);
     for (i = 0; i < dev->nregions; i++) {
         void *mmap_addr;
diff --git a/hw/virtio/vhost-user.c b/hw/virtio/vhost-user.c
index ec6a4a82fd..64f4b3b3f9 100644
--- a/hw/virtio/vhost-user.c
+++ b/hw/virtio/vhost-user.c
@@ -325,15 +325,86 @@  static int vhost_user_set_log_base(struct vhost_dev *dev, uint64_t base,
     return 0;
 }
 
+static int vhost_user_set_mem_table_postcopy(struct vhost_dev *dev,
+                                             struct vhost_memory *mem)
+{
+    int fds[VHOST_MEMORY_MAX_NREGIONS];
+    int i, fd;
+    size_t fd_num = 0;
+    bool reply_supported = virtio_has_feature(dev->protocol_features,
+                                              VHOST_USER_PROTOCOL_F_REPLY_ACK);
+    /* TODO: Add actual postcopy differences */
+    VhostUserMsg msg = {
+        .hdr.request = VHOST_USER_SET_MEM_TABLE,
+        .hdr.flags = VHOST_USER_VERSION,
+    };
+
+    if (reply_supported) {
+        msg.hdr.flags |= VHOST_USER_NEED_REPLY_MASK;
+    }
+
+    for (i = 0; i < dev->mem->nregions; ++i) {
+        struct vhost_memory_region *reg = dev->mem->regions + i;
+        ram_addr_t offset;
+        MemoryRegion *mr;
+
+        assert((uintptr_t)reg->userspace_addr == reg->userspace_addr);
+        mr = memory_region_from_host((void *)(uintptr_t)reg->userspace_addr,
+                                     &offset);
+        fd = memory_region_get_fd(mr);
+        if (fd > 0) {
+            msg.payload.memory.regions[fd_num].userspace_addr =
+                reg->userspace_addr;
+            msg.payload.memory.regions[fd_num].memory_size  = reg->memory_size;
+            msg.payload.memory.regions[fd_num].guest_phys_addr =
+                reg->guest_phys_addr;
+            msg.payload.memory.regions[fd_num].mmap_offset = offset;
+            assert(fd_num < VHOST_MEMORY_MAX_NREGIONS);
+            fds[fd_num++] = fd;
+        }
+    }
+
+    msg.payload.memory.nregions = fd_num;
+
+    if (!fd_num) {
+        error_report("Failed initializing vhost-user memory map, "
+                     "consider using -object memory-backend-file share=on");
+        return -1;
+    }
+
+    msg.hdr.size = sizeof(msg.payload.memory.nregions);
+    msg.hdr.size += sizeof(msg.payload.memory.padding);
+    msg.hdr.size += fd_num * sizeof(VhostUserMemoryRegion);
+
+    if (vhost_user_write(dev, &msg, fds, fd_num) < 0) {
+        return -1;
+    }
+
+    if (reply_supported) {
+        return process_message_reply(dev, &msg);
+    }
+
+    return 0;
+}
+
 static int vhost_user_set_mem_table(struct vhost_dev *dev,
                                     struct vhost_memory *mem)
 {
+    struct vhost_user *u = dev->opaque;
     int fds[VHOST_MEMORY_MAX_NREGIONS];
     int i, fd;
     size_t fd_num = 0;
+    bool do_postcopy = u->postcopy_listen && u->postcopy_fd.handler;
     bool reply_supported = virtio_has_feature(dev->protocol_features,
                                               VHOST_USER_PROTOCOL_F_REPLY_ACK);
 
+    if (do_postcopy) {
+        /* Postcopy has enough differences that it's best done in it's own
+         * version
+         */
+        return vhost_user_set_mem_table_postcopy(dev, mem);
+    }
+
     VhostUserMsg msg = {
         .hdr.request = VHOST_USER_SET_MEM_TABLE,
         .hdr.flags = VHOST_USER_VERSION,
@@ -357,9 +428,11 @@  static int vhost_user_set_mem_table(struct vhost_dev *dev,
                 error_report("Failed preparing vhost-user memory table msg");
                 return -1;
             }
-            msg.payload.memory.regions[fd_num].userspace_addr = reg->userspace_addr;
+            msg.payload.memory.regions[fd_num].userspace_addr =
+                reg->userspace_addr;
             msg.payload.memory.regions[fd_num].memory_size  = reg->memory_size;
-            msg.payload.memory.regions[fd_num].guest_phys_addr = reg->guest_phys_addr;
+            msg.payload.memory.regions[fd_num].guest_phys_addr =
+                reg->guest_phys_addr;
             msg.payload.memory.regions[fd_num].mmap_offset = offset;
             fds[fd_num++] = fd;
         }