Message ID | 1592684486-18511-9-git-send-email-kwankhede@nvidia.com (mailing list archive) |
---|---|
State | New, archived |
Headers | show |
Series | Add migration support for VFIO devices | expand |
On Sun, 21 Jun 2020 01:51:17 +0530 Kirti Wankhede <kwankhede@nvidia.com> wrote: > Added .save_live_pending, .save_live_iterate and .save_live_complete_precopy > functions. These functions handles pre-copy and stop-and-copy phase. > > In _SAVING|_RUNNING device state or pre-copy phase: > - read pending_bytes. If pending_bytes > 0, go through below steps. > - read data_offset - indicates kernel driver to write data to staging > buffer. > - read data_size - amount of data in bytes written by vendor driver in > migration region. > - read data_size bytes of data from data_offset in the migration region. > - Write data packet to file stream as below: > {VFIO_MIG_FLAG_DEV_DATA_STATE, data_size, actual data, > VFIO_MIG_FLAG_END_OF_STATE } > > In _SAVING device state or stop-and-copy phase > a. read config space of device and save to migration file stream. This > doesn't need to be from vendor driver. Any other special config state > from driver can be saved as data in following iteration. > b. read pending_bytes. If pending_bytes > 0, go through below steps. > c. read data_offset - indicates kernel driver to write data to staging > buffer. > d. read data_size - amount of data in bytes written by vendor driver in > migration region. > e. read data_size bytes of data from data_offset in the migration region. > f. Write data packet as below: > {VFIO_MIG_FLAG_DEV_DATA_STATE, data_size, actual data} > g. iterate through steps b to f while (pending_bytes > 0) > h. Write {VFIO_MIG_FLAG_END_OF_STATE} > > When data region is mapped, its user's responsibility to read data from > data_offset of data_size before moving to next steps. > > Signed-off-by: Kirti Wankhede <kwankhede@nvidia.com> > Reviewed-by: Neo Jia <cjia@nvidia.com> > --- > hw/vfio/migration.c | 283 ++++++++++++++++++++++++++++++++++++++++++ > hw/vfio/trace-events | 6 + > include/hw/vfio/vfio-common.h | 1 + > 3 files changed, 290 insertions(+) > > diff --git a/hw/vfio/migration.c b/hw/vfio/migration.c > index 133bb5b1b3b2..ef1150c1ff02 100644 > --- a/hw/vfio/migration.c > +++ b/hw/vfio/migration.c > @@ -140,6 +140,168 @@ static int vfio_migration_set_state(VFIODevice *vbasedev, uint32_t mask, > return 0; > } > > +static void *get_data_section_size(VFIORegion *region, uint64_t data_offset, > + uint64_t data_size, uint64_t *size) > +{ > + void *ptr = NULL; > + int i; > + > + if (!region->mmaps) { > + *size = data_size; > + return ptr; > + } > + > + /* check if data_offset in within sparse mmap areas */ > + for (i = 0; i < region->nr_mmaps; i++) { > + VFIOMmap *map = region->mmaps + i; > + > + if ((data_offset >= map->offset) && > + (data_offset < map->offset + map->size)) { > + ptr = map->mmap + data_offset - map->offset; > + > + if (data_offset + data_size <= map->offset + map->size) { > + *size = data_size; > + } else { > + *size = map->offset + map->size - data_offset; > + } Ultimately we take whichever result is smaller, so we could just use: *size = MIN(data_size, map->offset + map->size - data_offset); > + break; > + } > + } > + > + if (!ptr) { > + uint64_t limit = 0; > + > + /* > + * data_offset is not within sparse mmap areas, find size of non-mapped > + * area. Check through all list since region->mmaps list is not sorted. > + */ > + for (i = 0; i < region->nr_mmaps; i++) { > + VFIOMmap *map = region->mmaps + i; > + > + if ((data_offset < map->offset) && > + (!limit || limit > map->offset)) { > + limit = map->offset; > + } We could have done this in an else branch of the previous loop to avoid walking the entries twice. > + } > + > + *size = limit ? limit - data_offset : data_size; > + } > + return ptr; > +} > + > +static int vfio_save_buffer(QEMUFile *f, VFIODevice *vbasedev) > +{ > + VFIOMigration *migration = vbasedev->migration; > + VFIORegion *region = &migration->region; > + uint64_t data_offset = 0, data_size = 0, size; > + int ret; > + > + ret = pread(vbasedev->fd, &data_offset, sizeof(data_offset), > + region->fd_offset + offsetof(struct vfio_device_migration_info, > + data_offset)); > + if (ret != sizeof(data_offset)) { > + error_report("%s: Failed to get migration buffer data offset %d", > + vbasedev->name, ret); > + return -EINVAL; > + } > + > + ret = pread(vbasedev->fd, &data_size, sizeof(data_size), > + region->fd_offset + offsetof(struct vfio_device_migration_info, > + data_size)); > + if (ret != sizeof(data_size)) { > + error_report("%s: Failed to get migration buffer data size %d", > + vbasedev->name, ret); > + return -EINVAL; > + } > + > + trace_vfio_save_buffer(vbasedev->name, data_offset, data_size, > + migration->pending_bytes); > + > + qemu_put_be64(f, data_size); > + size = data_size; > + > + while (size) { > + void *buf = NULL; > + bool buffer_mmaped; > + uint64_t sec_size; > + > + buf = get_data_section_size(region, data_offset, size, &sec_size); > + > + buffer_mmaped = (buf != NULL); > + > + if (!buffer_mmaped) { > + buf = g_try_malloc(sec_size); > + if (!buf) { > + error_report("%s: Error allocating buffer ", __func__); > + return -ENOMEM; > + } > + > + ret = pread(vbasedev->fd, buf, sec_size, > + region->fd_offset + data_offset); Is the trade-off to allocate this buffer worth it? I'd be tempted to iterate with a basic data type here to avoid what could potentially be a large memory allocation above. It feels a little more robust, if not perhaps as fast, but I this will mostly be a fallback or only cover small ranges in normal operation. Of course the data stream needs to be compatible either way we retrieve it. > + if (ret != sec_size) { > + error_report("%s: Failed to get migration data %d", > + vbasedev->name, ret); > + g_free(buf); > + return -EINVAL; > + } > + } > + > + qemu_put_buffer(f, buf, sec_size); > + > + if (!buffer_mmaped) { > + g_free(buf); > + } > + size -= sec_size; > + data_offset += sec_size; > + } > + > + ret = qemu_file_get_error(f); > + if (ret) { > + return ret; > + } > + > + return data_size; This function returns int, data_size is uint64_t. Thanks, Alex > +} > + > +static int vfio_update_pending(VFIODevice *vbasedev) > +{ > + VFIOMigration *migration = vbasedev->migration; > + VFIORegion *region = &migration->region; > + uint64_t pending_bytes = 0; > + int ret; > + > + ret = pread(vbasedev->fd, &pending_bytes, sizeof(pending_bytes), > + region->fd_offset + offsetof(struct vfio_device_migration_info, > + pending_bytes)); > + if ((ret < 0) || (ret != sizeof(pending_bytes))) { > + error_report("%s: Failed to get pending bytes %d", > + vbasedev->name, ret); > + migration->pending_bytes = 0; > + return (ret < 0) ? ret : -EINVAL; > + } > + > + migration->pending_bytes = pending_bytes; > + trace_vfio_update_pending(vbasedev->name, pending_bytes); > + return 0; > +} > + > +static int vfio_save_device_config_state(QEMUFile *f, void *opaque) > +{ > + VFIODevice *vbasedev = opaque; > + > + qemu_put_be64(f, VFIO_MIG_FLAG_DEV_CONFIG_STATE); > + > + if (vbasedev->ops && vbasedev->ops->vfio_save_config) { > + vbasedev->ops->vfio_save_config(vbasedev, f); > + } > + > + qemu_put_be64(f, VFIO_MIG_FLAG_END_OF_STATE); > + > + trace_vfio_save_device_config_state(vbasedev->name); > + > + return qemu_file_get_error(f); > +} > + > /* ---------------------------------------------------------------------- */ > > static int vfio_save_setup(QEMUFile *f, void *opaque) > @@ -192,9 +354,130 @@ static void vfio_save_cleanup(void *opaque) > trace_vfio_save_cleanup(vbasedev->name); > } > > +static void vfio_save_pending(QEMUFile *f, void *opaque, > + uint64_t threshold_size, > + uint64_t *res_precopy_only, > + uint64_t *res_compatible, > + uint64_t *res_postcopy_only) > +{ > + VFIODevice *vbasedev = opaque; > + VFIOMigration *migration = vbasedev->migration; > + int ret; > + > + ret = vfio_update_pending(vbasedev); > + if (ret) { > + return; > + } > + > + *res_precopy_only += migration->pending_bytes; > + > + trace_vfio_save_pending(vbasedev->name, *res_precopy_only, > + *res_postcopy_only, *res_compatible); > +} > + > +static int vfio_save_iterate(QEMUFile *f, void *opaque) > +{ > + VFIODevice *vbasedev = opaque; > + VFIOMigration *migration = vbasedev->migration; > + int ret, data_size; > + > + qemu_put_be64(f, VFIO_MIG_FLAG_DEV_DATA_STATE); > + > + if (migration->pending_bytes == 0) { > + ret = vfio_update_pending(vbasedev); > + if (ret) { > + return ret; > + } > + > + if (migration->pending_bytes == 0) { > + /* indicates data finished, goto complete phase */ > + return 1; > + } > + } > + > + data_size = vfio_save_buffer(f, vbasedev); > + > + if (data_size < 0) { > + error_report("%s: vfio_save_buffer failed %s", vbasedev->name, > + strerror(errno)); > + return data_size; > + } > + > + qemu_put_be64(f, VFIO_MIG_FLAG_END_OF_STATE); > + > + ret = qemu_file_get_error(f); > + if (ret) { > + return ret; > + } > + > + trace_vfio_save_iterate(vbasedev->name, data_size); > + > + return 0; > +} > + > +static int vfio_save_complete_precopy(QEMUFile *f, void *opaque) > +{ > + VFIODevice *vbasedev = opaque; > + VFIOMigration *migration = vbasedev->migration; > + int ret; > + > + ret = vfio_migration_set_state(vbasedev, ~VFIO_DEVICE_STATE_RUNNING, > + VFIO_DEVICE_STATE_SAVING); > + if (ret) { > + error_report("%s: Failed to set state STOP and SAVING", > + vbasedev->name); > + return ret; > + } > + > + ret = vfio_save_device_config_state(f, opaque); > + if (ret) { > + return ret; > + } > + > + ret = vfio_update_pending(vbasedev); > + if (ret) { > + return ret; > + } > + > + while (migration->pending_bytes > 0) { > + qemu_put_be64(f, VFIO_MIG_FLAG_DEV_DATA_STATE); > + ret = vfio_save_buffer(f, vbasedev); > + if (ret < 0) { > + error_report("%s: Failed to save buffer", vbasedev->name); > + return ret; > + } else if (ret == 0) { > + break; > + } > + > + ret = vfio_update_pending(vbasedev); > + if (ret) { > + return ret; > + } > + } > + > + qemu_put_be64(f, VFIO_MIG_FLAG_END_OF_STATE); > + > + ret = qemu_file_get_error(f); > + if (ret) { > + return ret; > + } > + > + ret = vfio_migration_set_state(vbasedev, ~VFIO_DEVICE_STATE_SAVING, 0); > + if (ret) { > + error_report("%s: Failed to set state STOPPED", vbasedev->name); > + return ret; > + } > + > + trace_vfio_save_complete_precopy(vbasedev->name); > + return ret; > +} > + > static SaveVMHandlers savevm_vfio_handlers = { > .save_setup = vfio_save_setup, > .save_cleanup = vfio_save_cleanup, > + .save_live_pending = vfio_save_pending, > + .save_live_iterate = vfio_save_iterate, > + .save_live_complete_precopy = vfio_save_complete_precopy, > }; > > /* ---------------------------------------------------------------------- */ > diff --git a/hw/vfio/trace-events b/hw/vfio/trace-events > index 86c18def016e..9a1c5e17d97f 100644 > --- a/hw/vfio/trace-events > +++ b/hw/vfio/trace-events > @@ -151,3 +151,9 @@ vfio_vmstate_change(const char *name, int running, const char *reason, uint32_t > vfio_migration_state_notifier(const char *name, const char *state) " (%s) state %s" > vfio_save_setup(const char *name) " (%s)" > vfio_save_cleanup(const char *name) " (%s)" > +vfio_save_buffer(const char *name, uint64_t data_offset, uint64_t data_size, uint64_t pending) " (%s) Offset 0x%"PRIx64" size 0x%"PRIx64" pending 0x%"PRIx64 > +vfio_update_pending(const char *name, uint64_t pending) " (%s) pending 0x%"PRIx64 > +vfio_save_device_config_state(const char *name) " (%s)" > +vfio_save_pending(const char *name, uint64_t precopy, uint64_t postcopy, uint64_t compatible) " (%s) precopy 0x%"PRIx64" postcopy 0x%"PRIx64" compatible 0x%"PRIx64 > +vfio_save_iterate(const char *name, int data_size) " (%s) data_size %d" > +vfio_save_complete_precopy(const char *name) " (%s)" > diff --git a/include/hw/vfio/vfio-common.h b/include/hw/vfio/vfio-common.h > index 28f55f66d019..c78033e4149d 100644 > --- a/include/hw/vfio/vfio-common.h > +++ b/include/hw/vfio/vfio-common.h > @@ -60,6 +60,7 @@ typedef struct VFIORegion { > > typedef struct VFIOMigration { > VFIORegion region; > + uint64_t pending_bytes; > } VFIOMigration; > > typedef struct VFIOAddressSpace {
On 6/23/2020 4:20 AM, Alex Williamson wrote: > On Sun, 21 Jun 2020 01:51:17 +0530 > Kirti Wankhede <kwankhede@nvidia.com> wrote: > >> Added .save_live_pending, .save_live_iterate and .save_live_complete_precopy >> functions. These functions handles pre-copy and stop-and-copy phase. >> >> In _SAVING|_RUNNING device state or pre-copy phase: >> - read pending_bytes. If pending_bytes > 0, go through below steps. >> - read data_offset - indicates kernel driver to write data to staging >> buffer. >> - read data_size - amount of data in bytes written by vendor driver in >> migration region. >> - read data_size bytes of data from data_offset in the migration region. >> - Write data packet to file stream as below: >> {VFIO_MIG_FLAG_DEV_DATA_STATE, data_size, actual data, >> VFIO_MIG_FLAG_END_OF_STATE } >> >> In _SAVING device state or stop-and-copy phase >> a. read config space of device and save to migration file stream. This >> doesn't need to be from vendor driver. Any other special config state >> from driver can be saved as data in following iteration. >> b. read pending_bytes. If pending_bytes > 0, go through below steps. >> c. read data_offset - indicates kernel driver to write data to staging >> buffer. >> d. read data_size - amount of data in bytes written by vendor driver in >> migration region. >> e. read data_size bytes of data from data_offset in the migration region. >> f. Write data packet as below: >> {VFIO_MIG_FLAG_DEV_DATA_STATE, data_size, actual data} >> g. iterate through steps b to f while (pending_bytes > 0) >> h. Write {VFIO_MIG_FLAG_END_OF_STATE} >> >> When data region is mapped, its user's responsibility to read data from >> data_offset of data_size before moving to next steps. >> >> Signed-off-by: Kirti Wankhede <kwankhede@nvidia.com> >> Reviewed-by: Neo Jia <cjia@nvidia.com> >> --- >> hw/vfio/migration.c | 283 ++++++++++++++++++++++++++++++++++++++++++ >> hw/vfio/trace-events | 6 + >> include/hw/vfio/vfio-common.h | 1 + >> 3 files changed, 290 insertions(+) >> >> diff --git a/hw/vfio/migration.c b/hw/vfio/migration.c >> index 133bb5b1b3b2..ef1150c1ff02 100644 >> --- a/hw/vfio/migration.c >> +++ b/hw/vfio/migration.c >> @@ -140,6 +140,168 @@ static int vfio_migration_set_state(VFIODevice *vbasedev, uint32_t mask, >> return 0; >> } >> >> +static void *get_data_section_size(VFIORegion *region, uint64_t data_offset, >> + uint64_t data_size, uint64_t *size) >> +{ >> + void *ptr = NULL; >> + int i; >> + >> + if (!region->mmaps) { >> + *size = data_size; >> + return ptr; >> + } >> + >> + /* check if data_offset in within sparse mmap areas */ >> + for (i = 0; i < region->nr_mmaps; i++) { >> + VFIOMmap *map = region->mmaps + i; >> + >> + if ((data_offset >= map->offset) && >> + (data_offset < map->offset + map->size)) { >> + ptr = map->mmap + data_offset - map->offset; >> + >> + if (data_offset + data_size <= map->offset + map->size) { >> + *size = data_size; >> + } else { >> + *size = map->offset + map->size - data_offset; >> + } > > Ultimately we take whichever result is smaller, so we could just use: > > *size = MIN(data_size, map->offset + map->size - data_offset); > >> + break; >> + } >> + } >> + >> + if (!ptr) { >> + uint64_t limit = 0; >> + >> + /* >> + * data_offset is not within sparse mmap areas, find size of non-mapped >> + * area. Check through all list since region->mmaps list is not sorted. >> + */ >> + for (i = 0; i < region->nr_mmaps; i++) { >> + VFIOMmap *map = region->mmaps + i; >> + >> + if ((data_offset < map->offset) && >> + (!limit || limit > map->offset)) { >> + limit = map->offset; >> + } > > We could have done this in an else branch of the previous loop to avoid > walking the entries twice. > Ok. updating with above 2 changes. >> + } >> + >> + *size = limit ? limit - data_offset : data_size; >> + } >> + return ptr; >> +} >> + >> +static int vfio_save_buffer(QEMUFile *f, VFIODevice *vbasedev) >> +{ >> + VFIOMigration *migration = vbasedev->migration; >> + VFIORegion *region = &migration->region; >> + uint64_t data_offset = 0, data_size = 0, size; >> + int ret; >> + >> + ret = pread(vbasedev->fd, &data_offset, sizeof(data_offset), >> + region->fd_offset + offsetof(struct vfio_device_migration_info, >> + data_offset)); >> + if (ret != sizeof(data_offset)) { >> + error_report("%s: Failed to get migration buffer data offset %d", >> + vbasedev->name, ret); >> + return -EINVAL; >> + } >> + >> + ret = pread(vbasedev->fd, &data_size, sizeof(data_size), >> + region->fd_offset + offsetof(struct vfio_device_migration_info, >> + data_size)); >> + if (ret != sizeof(data_size)) { >> + error_report("%s: Failed to get migration buffer data size %d", >> + vbasedev->name, ret); >> + return -EINVAL; >> + } >> + >> + trace_vfio_save_buffer(vbasedev->name, data_offset, data_size, >> + migration->pending_bytes); >> + >> + qemu_put_be64(f, data_size); >> + size = data_size; >> + >> + while (size) { >> + void *buf = NULL; >> + bool buffer_mmaped; >> + uint64_t sec_size; >> + >> + buf = get_data_section_size(region, data_offset, size, &sec_size); >> + >> + buffer_mmaped = (buf != NULL); >> + >> + if (!buffer_mmaped) { >> + buf = g_try_malloc(sec_size); >> + if (!buf) { >> + error_report("%s: Error allocating buffer ", __func__); >> + return -ENOMEM; >> + } >> + >> + ret = pread(vbasedev->fd, buf, sec_size, >> + region->fd_offset + data_offset); > > Is the trade-off to allocate this buffer worth it? I'd be tempted to > iterate with a basic data type here to avoid what could potentially be > a large memory allocation above. It feels a little more robust, if not > perhaps as fast, but I this will mostly be a fallback or only cover > small ranges in normal operation. Of course the data stream needs to > be compatible either way we retrieve it. > What should be basic data type here, u8, u16, u32, u64? We don't know at what granularity vendor driver is writing, then I thnk we have to go with smallest u8, right? >> + if (ret != sec_size) { >> + error_report("%s: Failed to get migration data %d", >> + vbasedev->name, ret); >> + g_free(buf); >> + return -EINVAL; >> + } >> + } >> + >> + qemu_put_buffer(f, buf, sec_size); >> + >> + if (!buffer_mmaped) { >> + g_free(buf); >> + } >> + size -= sec_size; >> + data_offset += sec_size; >> + } >> + >> + ret = qemu_file_get_error(f); >> + if (ret) { >> + return ret; >> + } >> + >> + return data_size; > > This function returns int, data_size is uint64_t. Thanks, > Yes, returns for this function: < 0 => error ==0 => no more data to save data_size => amount of data saved in this function. Thanks, Kirti
On Wed, 24 Jun 2020 02:04:24 +0530 Kirti Wankhede <kwankhede@nvidia.com> wrote: > On 6/23/2020 4:20 AM, Alex Williamson wrote: > > On Sun, 21 Jun 2020 01:51:17 +0530 > > Kirti Wankhede <kwankhede@nvidia.com> wrote: > > > >> Added .save_live_pending, .save_live_iterate and .save_live_complete_precopy > >> functions. These functions handles pre-copy and stop-and-copy phase. > >> > >> In _SAVING|_RUNNING device state or pre-copy phase: > >> - read pending_bytes. If pending_bytes > 0, go through below steps. > >> - read data_offset - indicates kernel driver to write data to staging > >> buffer. > >> - read data_size - amount of data in bytes written by vendor driver in > >> migration region. > >> - read data_size bytes of data from data_offset in the migration region. > >> - Write data packet to file stream as below: > >> {VFIO_MIG_FLAG_DEV_DATA_STATE, data_size, actual data, > >> VFIO_MIG_FLAG_END_OF_STATE } > >> > >> In _SAVING device state or stop-and-copy phase > >> a. read config space of device and save to migration file stream. This > >> doesn't need to be from vendor driver. Any other special config state > >> from driver can be saved as data in following iteration. > >> b. read pending_bytes. If pending_bytes > 0, go through below steps. > >> c. read data_offset - indicates kernel driver to write data to staging > >> buffer. > >> d. read data_size - amount of data in bytes written by vendor driver in > >> migration region. > >> e. read data_size bytes of data from data_offset in the migration region. > >> f. Write data packet as below: > >> {VFIO_MIG_FLAG_DEV_DATA_STATE, data_size, actual data} > >> g. iterate through steps b to f while (pending_bytes > 0) > >> h. Write {VFIO_MIG_FLAG_END_OF_STATE} > >> > >> When data region is mapped, its user's responsibility to read data from > >> data_offset of data_size before moving to next steps. > >> > >> Signed-off-by: Kirti Wankhede <kwankhede@nvidia.com> > >> Reviewed-by: Neo Jia <cjia@nvidia.com> > >> --- > >> hw/vfio/migration.c | 283 ++++++++++++++++++++++++++++++++++++++++++ > >> hw/vfio/trace-events | 6 + > >> include/hw/vfio/vfio-common.h | 1 + > >> 3 files changed, 290 insertions(+) > >> > >> diff --git a/hw/vfio/migration.c b/hw/vfio/migration.c > >> index 133bb5b1b3b2..ef1150c1ff02 100644 > >> --- a/hw/vfio/migration.c > >> +++ b/hw/vfio/migration.c > >> @@ -140,6 +140,168 @@ static int vfio_migration_set_state(VFIODevice *vbasedev, uint32_t mask, > >> return 0; > >> } > >> > >> +static void *get_data_section_size(VFIORegion *region, uint64_t data_offset, > >> + uint64_t data_size, uint64_t *size) > >> +{ > >> + void *ptr = NULL; > >> + int i; > >> + > >> + if (!region->mmaps) { > >> + *size = data_size; > >> + return ptr; > >> + } > >> + > >> + /* check if data_offset in within sparse mmap areas */ > >> + for (i = 0; i < region->nr_mmaps; i++) { > >> + VFIOMmap *map = region->mmaps + i; > >> + > >> + if ((data_offset >= map->offset) && > >> + (data_offset < map->offset + map->size)) { > >> + ptr = map->mmap + data_offset - map->offset; > >> + > >> + if (data_offset + data_size <= map->offset + map->size) { > >> + *size = data_size; > >> + } else { > >> + *size = map->offset + map->size - data_offset; > >> + } > > > > Ultimately we take whichever result is smaller, so we could just use: > > > > *size = MIN(data_size, map->offset + map->size - data_offset); > > > >> + break; > >> + } > >> + } > >> + > >> + if (!ptr) { > >> + uint64_t limit = 0; > >> + > >> + /* > >> + * data_offset is not within sparse mmap areas, find size of non-mapped > >> + * area. Check through all list since region->mmaps list is not sorted. > >> + */ > >> + for (i = 0; i < region->nr_mmaps; i++) { > >> + VFIOMmap *map = region->mmaps + i; > >> + > >> + if ((data_offset < map->offset) && > >> + (!limit || limit > map->offset)) { > >> + limit = map->offset; > >> + } > > > > We could have done this in an else branch of the previous loop to avoid > > walking the entries twice. > > > > Ok. updating with above 2 changes. > > >> + } > >> + > >> + *size = limit ? limit - data_offset : data_size; > >> + } > >> + return ptr; > >> +} > >> + > >> +static int vfio_save_buffer(QEMUFile *f, VFIODevice *vbasedev) > >> +{ > >> + VFIOMigration *migration = vbasedev->migration; > >> + VFIORegion *region = &migration->region; > >> + uint64_t data_offset = 0, data_size = 0, size; > >> + int ret; > >> + > >> + ret = pread(vbasedev->fd, &data_offset, sizeof(data_offset), > >> + region->fd_offset + offsetof(struct vfio_device_migration_info, > >> + data_offset)); > >> + if (ret != sizeof(data_offset)) { > >> + error_report("%s: Failed to get migration buffer data offset %d", > >> + vbasedev->name, ret); > >> + return -EINVAL; > >> + } > >> + > >> + ret = pread(vbasedev->fd, &data_size, sizeof(data_size), > >> + region->fd_offset + offsetof(struct vfio_device_migration_info, > >> + data_size)); > >> + if (ret != sizeof(data_size)) { > >> + error_report("%s: Failed to get migration buffer data size %d", > >> + vbasedev->name, ret); > >> + return -EINVAL; > >> + } > >> + > >> + trace_vfio_save_buffer(vbasedev->name, data_offset, data_size, > >> + migration->pending_bytes); > >> + > >> + qemu_put_be64(f, data_size); > >> + size = data_size; > >> + > >> + while (size) { > >> + void *buf = NULL; > >> + bool buffer_mmaped; > >> + uint64_t sec_size; > >> + > >> + buf = get_data_section_size(region, data_offset, size, &sec_size); > >> + > >> + buffer_mmaped = (buf != NULL); > >> + > >> + if (!buffer_mmaped) { > >> + buf = g_try_malloc(sec_size); > >> + if (!buf) { > >> + error_report("%s: Error allocating buffer ", __func__); > >> + return -ENOMEM; > >> + } > >> + > >> + ret = pread(vbasedev->fd, buf, sec_size, > >> + region->fd_offset + data_offset); > > > > Is the trade-off to allocate this buffer worth it? I'd be tempted to > > iterate with a basic data type here to avoid what could potentially be > > a large memory allocation above. It feels a little more robust, if not > > perhaps as fast, but I this will mostly be a fallback or only cover > > small ranges in normal operation. Of course the data stream needs to > > be compatible either way we retrieve it. > > > > What should be basic data type here, u8, u16, u32, u64? We don't know at > what granularity vendor driver is writing, then I thnk we have to go > with smallest u8, right? That'd be a little on the ridiculous side. We could make a helper like in vfio_pci_rdwr that reads at the largest aligned size up to u64. > >> + if (ret != sec_size) { > >> + error_report("%s: Failed to get migration data %d", > >> + vbasedev->name, ret); > >> + g_free(buf); > >> + return -EINVAL; > >> + } > >> + } > >> + > >> + qemu_put_buffer(f, buf, sec_size); > >> + > >> + if (!buffer_mmaped) { > >> + g_free(buf); > >> + } > >> + size -= sec_size; > >> + data_offset += sec_size; > >> + } > >> + > >> + ret = qemu_file_get_error(f); > >> + if (ret) { > >> + return ret; > >> + } > >> + > >> + return data_size; > > > > This function returns int, data_size is uint64_t. Thanks, > > > > Yes, returns for this function: > < 0 => error > ==0 => no more data to save > data_size => amount of data saved in this function. So when data_size exceeds MAX_UINT, the return value goes negative... Thanks, Alex
diff --git a/hw/vfio/migration.c b/hw/vfio/migration.c index 133bb5b1b3b2..ef1150c1ff02 100644 --- a/hw/vfio/migration.c +++ b/hw/vfio/migration.c @@ -140,6 +140,168 @@ static int vfio_migration_set_state(VFIODevice *vbasedev, uint32_t mask, return 0; } +static void *get_data_section_size(VFIORegion *region, uint64_t data_offset, + uint64_t data_size, uint64_t *size) +{ + void *ptr = NULL; + int i; + + if (!region->mmaps) { + *size = data_size; + return ptr; + } + + /* check if data_offset in within sparse mmap areas */ + for (i = 0; i < region->nr_mmaps; i++) { + VFIOMmap *map = region->mmaps + i; + + if ((data_offset >= map->offset) && + (data_offset < map->offset + map->size)) { + ptr = map->mmap + data_offset - map->offset; + + if (data_offset + data_size <= map->offset + map->size) { + *size = data_size; + } else { + *size = map->offset + map->size - data_offset; + } + break; + } + } + + if (!ptr) { + uint64_t limit = 0; + + /* + * data_offset is not within sparse mmap areas, find size of non-mapped + * area. Check through all list since region->mmaps list is not sorted. + */ + for (i = 0; i < region->nr_mmaps; i++) { + VFIOMmap *map = region->mmaps + i; + + if ((data_offset < map->offset) && + (!limit || limit > map->offset)) { + limit = map->offset; + } + } + + *size = limit ? limit - data_offset : data_size; + } + return ptr; +} + +static int vfio_save_buffer(QEMUFile *f, VFIODevice *vbasedev) +{ + VFIOMigration *migration = vbasedev->migration; + VFIORegion *region = &migration->region; + uint64_t data_offset = 0, data_size = 0, size; + int ret; + + ret = pread(vbasedev->fd, &data_offset, sizeof(data_offset), + region->fd_offset + offsetof(struct vfio_device_migration_info, + data_offset)); + if (ret != sizeof(data_offset)) { + error_report("%s: Failed to get migration buffer data offset %d", + vbasedev->name, ret); + return -EINVAL; + } + + ret = pread(vbasedev->fd, &data_size, sizeof(data_size), + region->fd_offset + offsetof(struct vfio_device_migration_info, + data_size)); + if (ret != sizeof(data_size)) { + error_report("%s: Failed to get migration buffer data size %d", + vbasedev->name, ret); + return -EINVAL; + } + + trace_vfio_save_buffer(vbasedev->name, data_offset, data_size, + migration->pending_bytes); + + qemu_put_be64(f, data_size); + size = data_size; + + while (size) { + void *buf = NULL; + bool buffer_mmaped; + uint64_t sec_size; + + buf = get_data_section_size(region, data_offset, size, &sec_size); + + buffer_mmaped = (buf != NULL); + + if (!buffer_mmaped) { + buf = g_try_malloc(sec_size); + if (!buf) { + error_report("%s: Error allocating buffer ", __func__); + return -ENOMEM; + } + + ret = pread(vbasedev->fd, buf, sec_size, + region->fd_offset + data_offset); + if (ret != sec_size) { + error_report("%s: Failed to get migration data %d", + vbasedev->name, ret); + g_free(buf); + return -EINVAL; + } + } + + qemu_put_buffer(f, buf, sec_size); + + if (!buffer_mmaped) { + g_free(buf); + } + size -= sec_size; + data_offset += sec_size; + } + + ret = qemu_file_get_error(f); + if (ret) { + return ret; + } + + return data_size; +} + +static int vfio_update_pending(VFIODevice *vbasedev) +{ + VFIOMigration *migration = vbasedev->migration; + VFIORegion *region = &migration->region; + uint64_t pending_bytes = 0; + int ret; + + ret = pread(vbasedev->fd, &pending_bytes, sizeof(pending_bytes), + region->fd_offset + offsetof(struct vfio_device_migration_info, + pending_bytes)); + if ((ret < 0) || (ret != sizeof(pending_bytes))) { + error_report("%s: Failed to get pending bytes %d", + vbasedev->name, ret); + migration->pending_bytes = 0; + return (ret < 0) ? ret : -EINVAL; + } + + migration->pending_bytes = pending_bytes; + trace_vfio_update_pending(vbasedev->name, pending_bytes); + return 0; +} + +static int vfio_save_device_config_state(QEMUFile *f, void *opaque) +{ + VFIODevice *vbasedev = opaque; + + qemu_put_be64(f, VFIO_MIG_FLAG_DEV_CONFIG_STATE); + + if (vbasedev->ops && vbasedev->ops->vfio_save_config) { + vbasedev->ops->vfio_save_config(vbasedev, f); + } + + qemu_put_be64(f, VFIO_MIG_FLAG_END_OF_STATE); + + trace_vfio_save_device_config_state(vbasedev->name); + + return qemu_file_get_error(f); +} + /* ---------------------------------------------------------------------- */ static int vfio_save_setup(QEMUFile *f, void *opaque) @@ -192,9 +354,130 @@ static void vfio_save_cleanup(void *opaque) trace_vfio_save_cleanup(vbasedev->name); } +static void vfio_save_pending(QEMUFile *f, void *opaque, + uint64_t threshold_size, + uint64_t *res_precopy_only, + uint64_t *res_compatible, + uint64_t *res_postcopy_only) +{ + VFIODevice *vbasedev = opaque; + VFIOMigration *migration = vbasedev->migration; + int ret; + + ret = vfio_update_pending(vbasedev); + if (ret) { + return; + } + + *res_precopy_only += migration->pending_bytes; + + trace_vfio_save_pending(vbasedev->name, *res_precopy_only, + *res_postcopy_only, *res_compatible); +} + +static int vfio_save_iterate(QEMUFile *f, void *opaque) +{ + VFIODevice *vbasedev = opaque; + VFIOMigration *migration = vbasedev->migration; + int ret, data_size; + + qemu_put_be64(f, VFIO_MIG_FLAG_DEV_DATA_STATE); + + if (migration->pending_bytes == 0) { + ret = vfio_update_pending(vbasedev); + if (ret) { + return ret; + } + + if (migration->pending_bytes == 0) { + /* indicates data finished, goto complete phase */ + return 1; + } + } + + data_size = vfio_save_buffer(f, vbasedev); + + if (data_size < 0) { + error_report("%s: vfio_save_buffer failed %s", vbasedev->name, + strerror(errno)); + return data_size; + } + + qemu_put_be64(f, VFIO_MIG_FLAG_END_OF_STATE); + + ret = qemu_file_get_error(f); + if (ret) { + return ret; + } + + trace_vfio_save_iterate(vbasedev->name, data_size); + + return 0; +} + +static int vfio_save_complete_precopy(QEMUFile *f, void *opaque) +{ + VFIODevice *vbasedev = opaque; + VFIOMigration *migration = vbasedev->migration; + int ret; + + ret = vfio_migration_set_state(vbasedev, ~VFIO_DEVICE_STATE_RUNNING, + VFIO_DEVICE_STATE_SAVING); + if (ret) { + error_report("%s: Failed to set state STOP and SAVING", + vbasedev->name); + return ret; + } + + ret = vfio_save_device_config_state(f, opaque); + if (ret) { + return ret; + } + + ret = vfio_update_pending(vbasedev); + if (ret) { + return ret; + } + + while (migration->pending_bytes > 0) { + qemu_put_be64(f, VFIO_MIG_FLAG_DEV_DATA_STATE); + ret = vfio_save_buffer(f, vbasedev); + if (ret < 0) { + error_report("%s: Failed to save buffer", vbasedev->name); + return ret; + } else if (ret == 0) { + break; + } + + ret = vfio_update_pending(vbasedev); + if (ret) { + return ret; + } + } + + qemu_put_be64(f, VFIO_MIG_FLAG_END_OF_STATE); + + ret = qemu_file_get_error(f); + if (ret) { + return ret; + } + + ret = vfio_migration_set_state(vbasedev, ~VFIO_DEVICE_STATE_SAVING, 0); + if (ret) { + error_report("%s: Failed to set state STOPPED", vbasedev->name); + return ret; + } + + trace_vfio_save_complete_precopy(vbasedev->name); + return ret; +} + static SaveVMHandlers savevm_vfio_handlers = { .save_setup = vfio_save_setup, .save_cleanup = vfio_save_cleanup, + .save_live_pending = vfio_save_pending, + .save_live_iterate = vfio_save_iterate, + .save_live_complete_precopy = vfio_save_complete_precopy, }; /* ---------------------------------------------------------------------- */ diff --git a/hw/vfio/trace-events b/hw/vfio/trace-events index 86c18def016e..9a1c5e17d97f 100644 --- a/hw/vfio/trace-events +++ b/hw/vfio/trace-events @@ -151,3 +151,9 @@ vfio_vmstate_change(const char *name, int running, const char *reason, uint32_t vfio_migration_state_notifier(const char *name, const char *state) " (%s) state %s" vfio_save_setup(const char *name) " (%s)" vfio_save_cleanup(const char *name) " (%s)" +vfio_save_buffer(const char *name, uint64_t data_offset, uint64_t data_size, uint64_t pending) " (%s) Offset 0x%"PRIx64" size 0x%"PRIx64" pending 0x%"PRIx64 +vfio_update_pending(const char *name, uint64_t pending) " (%s) pending 0x%"PRIx64 +vfio_save_device_config_state(const char *name) " (%s)" +vfio_save_pending(const char *name, uint64_t precopy, uint64_t postcopy, uint64_t compatible) " (%s) precopy 0x%"PRIx64" postcopy 0x%"PRIx64" compatible 0x%"PRIx64 +vfio_save_iterate(const char *name, int data_size) " (%s) data_size %d" +vfio_save_complete_precopy(const char *name) " (%s)" diff --git a/include/hw/vfio/vfio-common.h b/include/hw/vfio/vfio-common.h index 28f55f66d019..c78033e4149d 100644 --- a/include/hw/vfio/vfio-common.h +++ b/include/hw/vfio/vfio-common.h @@ -60,6 +60,7 @@ typedef struct VFIORegion { typedef struct VFIOMigration { VFIORegion region; + uint64_t pending_bytes; } VFIOMigration; typedef struct VFIOAddressSpace {