Message ID | 1603365127-14202-15-git-send-email-kwankhede@nvidia.com (mailing list archive) |
---|---|
State | New, archived |
Headers | show |
Series | Add migration support for VFIO devices | expand |
On Thu, 22 Oct 2020 16:42:04 +0530 Kirti Wankhede <kwankhede@nvidia.com> wrote: > When vIOMMU is enabled, register MAP notifier from log_sync when all > devices in container are in stop and copy phase of migration. Call replay > and get dirty pages from notifier callback. > > Suggested-by: Alex Williamson <alex.williamson@redhat.com> > Signed-off-by: Kirti Wankhede <kwankhede@nvidia.com> > --- > hw/vfio/common.c | 95 ++++++++++++++++++++++++++++++++++++++++--- > hw/vfio/trace-events | 1 + > include/hw/vfio/vfio-common.h | 1 + > 3 files changed, 91 insertions(+), 6 deletions(-) > > diff --git a/hw/vfio/common.c b/hw/vfio/common.c > index 2634387df948..98c2b1f9b190 100644 > --- a/hw/vfio/common.c > +++ b/hw/vfio/common.c > @@ -442,8 +442,8 @@ static bool vfio_listener_skipped_section(MemoryRegionSection *section) > } > > /* Called with rcu_read_lock held. */ > -static bool vfio_get_vaddr(IOMMUTLBEntry *iotlb, void **vaddr, > - bool *read_only) > +static bool vfio_get_xlat_addr(IOMMUTLBEntry *iotlb, void **vaddr, > + ram_addr_t *ram_addr, bool *read_only) > { > MemoryRegion *mr; > hwaddr xlat; > @@ -474,8 +474,17 @@ static bool vfio_get_vaddr(IOMMUTLBEntry *iotlb, void **vaddr, > return false; > } > > - *vaddr = memory_region_get_ram_ptr(mr) + xlat; > - *read_only = !writable || mr->readonly; > + if (vaddr) { > + *vaddr = memory_region_get_ram_ptr(mr) + xlat; > + } > + > + if (ram_addr) { > + *ram_addr = memory_region_get_ram_addr(mr) + xlat; > + } > + > + if (read_only) { > + *read_only = !writable || mr->readonly; > + } > > return true; > } > @@ -485,7 +494,6 @@ static void vfio_iommu_map_notify(IOMMUNotifier *n, IOMMUTLBEntry *iotlb) > VFIOGuestIOMMU *giommu = container_of(n, VFIOGuestIOMMU, n); > VFIOContainer *container = giommu->container; > hwaddr iova = iotlb->iova + giommu->iommu_offset; > - bool read_only; > void *vaddr; > int ret; > > @@ -501,7 +509,9 @@ static void vfio_iommu_map_notify(IOMMUNotifier *n, IOMMUTLBEntry *iotlb) > rcu_read_lock(); > > if ((iotlb->perm & IOMMU_RW) != IOMMU_NONE) { > - if (!vfio_get_vaddr(iotlb, &vaddr, &read_only)) { > + bool read_only; > + > + if (!vfio_get_xlat_addr(iotlb, &vaddr, NULL, &read_only)) { > goto out; > } > /* > @@ -899,11 +909,84 @@ err_out: > return ret; > } > > +static void vfio_iommu_map_dirty_notify(IOMMUNotifier *n, IOMMUTLBEntry *iotlb) > +{ > + VFIOGuestIOMMU *giommu = container_of(n, VFIOGuestIOMMU, dirty_notify); > + VFIOContainer *container = giommu->container; > + hwaddr iova = iotlb->iova + giommu->iommu_offset; > + ram_addr_t translated_addr; > + > + trace_vfio_iommu_map_dirty_notify(iova, iova + iotlb->addr_mask); > + > + if (iotlb->target_as != &address_space_memory) { > + error_report("Wrong target AS \"%s\", only system memory is allowed", > + iotlb->target_as->name ? iotlb->target_as->name : "none"); > + return; > + } > + > + rcu_read_lock(); > + > + if (vfio_get_xlat_addr(iotlb, NULL, &translated_addr, NULL)) { > + int ret; > + > + ret = vfio_get_dirty_bitmap(container, iova, iotlb->addr_mask + 1, > + translated_addr); > + if (ret) { > + error_report("vfio_iommu_map_dirty_notify(%p, 0x%"HWADDR_PRIx", " > + "0x%"HWADDR_PRIx") = %d (%m)", > + container, iova, > + iotlb->addr_mask + 1, ret); > + } > + } > + > + rcu_read_unlock(); > +} > + > static int vfio_sync_dirty_bitmap(VFIOContainer *container, > MemoryRegionSection *section) > { > ram_addr_t ram_addr; > > + if (memory_region_is_iommu(section->mr)) { > + VFIOGuestIOMMU *giommu; > + int ret = 0; > + > + QLIST_FOREACH(giommu, &container->giommu_list, giommu_next) { > + if (MEMORY_REGION(giommu->iommu) == section->mr && > + giommu->n.start == section->offset_within_region) { > + Int128 llend; > + Error *err = NULL; > + int idx = memory_region_iommu_attrs_to_index(giommu->iommu, > + MEMTXATTRS_UNSPECIFIED); > + > + llend = int128_add(int128_make64(section->offset_within_region), > + section->size); > + llend = int128_sub(llend, int128_one()); > + > + iommu_notifier_init(&giommu->dirty_notify, > + vfio_iommu_map_dirty_notify, > + IOMMU_NOTIFIER_MAP, > + section->offset_within_region, > + int128_get64(llend), > + idx); > + ret = memory_region_register_iommu_notifier(section->mr, > + &giommu->dirty_notify, &err); > + if (ret) { > + error_report_err(err); > + break; > + } > + > + memory_region_iommu_replay(giommu->iommu, > + &giommu->dirty_notify); > + > + memory_region_unregister_iommu_notifier(section->mr, > + &giommu->dirty_notify); Is it necessary to do the register/unregister? It seemed to me that perhaps we could do a replay independent of those. I'd also be tempted to move dirty_notify to a temporary object rather than store it on the giommu for such a brief usage, ie. define: struct giommu_dirty_notfier { IOMMUNotifier n; VFIOGuestIOMMU *giommu; } struct giommu_dirty_notfier n = { .giommu = giommu }; iommu_notifier_init(&n,...); memory_region_iommu_replay(giommu->iommu, &n); ... struct giommu_dirty_notfier *ndnotifier = container_of(n, struct giommu_dirty_notfier, n); VFIOGuestIOMMU *giommu = n->giommu; It's nice that we remove the extra bloat of the list/tree entirely with this approach. Thanks, Alex > + break; > + } > + } > + return ret; > + } > + > ram_addr = memory_region_get_ram_addr(section->mr) + > section->offset_within_region; > > diff --git a/hw/vfio/trace-events b/hw/vfio/trace-events > index c3b35aa2cce8..d9cb8998a228 100644 > --- a/hw/vfio/trace-events > +++ b/hw/vfio/trace-events > @@ -163,3 +163,4 @@ vfio_load_device_config_state(const char *name) " (%s)" > vfio_load_state(const char *name, uint64_t data) " (%s) data 0x%"PRIx64 > vfio_load_state_device_data(const char *name, uint64_t data_offset, uint64_t data_size) " (%s) Offset 0x%"PRIx64" size 0x%"PRIx64 > vfio_get_dirty_bitmap(int fd, uint64_t iova, uint64_t size, uint64_t bitmap_size, uint64_t start) "container fd=%d, iova=0x%"PRIx64" size= 0x%"PRIx64" bitmap_size=0x%"PRIx64" start=0x%"PRIx64 > +vfio_iommu_map_dirty_notify(uint64_t iova_start, uint64_t iova_end) "iommu dirty @ 0x%"PRIx64" - 0x%"PRIx64 > diff --git a/include/hw/vfio/vfio-common.h b/include/hw/vfio/vfio-common.h > index b1c1b18fd228..92872fae59ee 100644 > --- a/include/hw/vfio/vfio-common.h > +++ b/include/hw/vfio/vfio-common.h > @@ -99,6 +99,7 @@ typedef struct VFIOGuestIOMMU { > IOMMUMemoryRegion *iommu; > hwaddr iommu_offset; > IOMMUNotifier n; > + IOMMUNotifier dirty_notify; > QLIST_ENTRY(VFIOGuestIOMMU) giommu_next; > } VFIOGuestIOMMU; >
On 10/23/2020 2:07 AM, Alex Williamson wrote: > On Thu, 22 Oct 2020 16:42:04 +0530 > Kirti Wankhede <kwankhede@nvidia.com> wrote: > >> When vIOMMU is enabled, register MAP notifier from log_sync when all >> devices in container are in stop and copy phase of migration. Call replay >> and get dirty pages from notifier callback. >> >> Suggested-by: Alex Williamson <alex.williamson@redhat.com> >> Signed-off-by: Kirti Wankhede <kwankhede@nvidia.com> >> --- >> hw/vfio/common.c | 95 ++++++++++++++++++++++++++++++++++++++++--- >> hw/vfio/trace-events | 1 + >> include/hw/vfio/vfio-common.h | 1 + >> 3 files changed, 91 insertions(+), 6 deletions(-) >> >> diff --git a/hw/vfio/common.c b/hw/vfio/common.c >> index 2634387df948..98c2b1f9b190 100644 >> --- a/hw/vfio/common.c >> +++ b/hw/vfio/common.c >> @@ -442,8 +442,8 @@ static bool vfio_listener_skipped_section(MemoryRegionSection *section) >> } >> >> /* Called with rcu_read_lock held. */ >> -static bool vfio_get_vaddr(IOMMUTLBEntry *iotlb, void **vaddr, >> - bool *read_only) >> +static bool vfio_get_xlat_addr(IOMMUTLBEntry *iotlb, void **vaddr, >> + ram_addr_t *ram_addr, bool *read_only) >> { >> MemoryRegion *mr; >> hwaddr xlat; >> @@ -474,8 +474,17 @@ static bool vfio_get_vaddr(IOMMUTLBEntry *iotlb, void **vaddr, >> return false; >> } >> >> - *vaddr = memory_region_get_ram_ptr(mr) + xlat; >> - *read_only = !writable || mr->readonly; >> + if (vaddr) { >> + *vaddr = memory_region_get_ram_ptr(mr) + xlat; >> + } >> + >> + if (ram_addr) { >> + *ram_addr = memory_region_get_ram_addr(mr) + xlat; >> + } >> + >> + if (read_only) { >> + *read_only = !writable || mr->readonly; >> + } >> >> return true; >> } >> @@ -485,7 +494,6 @@ static void vfio_iommu_map_notify(IOMMUNotifier *n, IOMMUTLBEntry *iotlb) >> VFIOGuestIOMMU *giommu = container_of(n, VFIOGuestIOMMU, n); >> VFIOContainer *container = giommu->container; >> hwaddr iova = iotlb->iova + giommu->iommu_offset; >> - bool read_only; >> void *vaddr; >> int ret; >> >> @@ -501,7 +509,9 @@ static void vfio_iommu_map_notify(IOMMUNotifier *n, IOMMUTLBEntry *iotlb) >> rcu_read_lock(); >> >> if ((iotlb->perm & IOMMU_RW) != IOMMU_NONE) { >> - if (!vfio_get_vaddr(iotlb, &vaddr, &read_only)) { >> + bool read_only; >> + >> + if (!vfio_get_xlat_addr(iotlb, &vaddr, NULL, &read_only)) { >> goto out; >> } >> /* >> @@ -899,11 +909,84 @@ err_out: >> return ret; >> } >> >> +static void vfio_iommu_map_dirty_notify(IOMMUNotifier *n, IOMMUTLBEntry *iotlb) >> +{ >> + VFIOGuestIOMMU *giommu = container_of(n, VFIOGuestIOMMU, dirty_notify); >> + VFIOContainer *container = giommu->container; >> + hwaddr iova = iotlb->iova + giommu->iommu_offset; >> + ram_addr_t translated_addr; >> + >> + trace_vfio_iommu_map_dirty_notify(iova, iova + iotlb->addr_mask); >> + >> + if (iotlb->target_as != &address_space_memory) { >> + error_report("Wrong target AS \"%s\", only system memory is allowed", >> + iotlb->target_as->name ? iotlb->target_as->name : "none"); >> + return; >> + } >> + >> + rcu_read_lock(); >> + >> + if (vfio_get_xlat_addr(iotlb, NULL, &translated_addr, NULL)) { >> + int ret; >> + >> + ret = vfio_get_dirty_bitmap(container, iova, iotlb->addr_mask + 1, >> + translated_addr); >> + if (ret) { >> + error_report("vfio_iommu_map_dirty_notify(%p, 0x%"HWADDR_PRIx", " >> + "0x%"HWADDR_PRIx") = %d (%m)", >> + container, iova, >> + iotlb->addr_mask + 1, ret); >> + } >> + } >> + >> + rcu_read_unlock(); >> +} >> + >> static int vfio_sync_dirty_bitmap(VFIOContainer *container, >> MemoryRegionSection *section) >> { >> ram_addr_t ram_addr; >> >> + if (memory_region_is_iommu(section->mr)) { >> + VFIOGuestIOMMU *giommu; >> + int ret = 0; >> + >> + QLIST_FOREACH(giommu, &container->giommu_list, giommu_next) { >> + if (MEMORY_REGION(giommu->iommu) == section->mr && >> + giommu->n.start == section->offset_within_region) { >> + Int128 llend; >> + Error *err = NULL; >> + int idx = memory_region_iommu_attrs_to_index(giommu->iommu, >> + MEMTXATTRS_UNSPECIFIED); >> + >> + llend = int128_add(int128_make64(section->offset_within_region), >> + section->size); >> + llend = int128_sub(llend, int128_one()); >> + >> + iommu_notifier_init(&giommu->dirty_notify, >> + vfio_iommu_map_dirty_notify, >> + IOMMU_NOTIFIER_MAP, >> + section->offset_within_region, >> + int128_get64(llend), >> + idx); >> + ret = memory_region_register_iommu_notifier(section->mr, >> + &giommu->dirty_notify, &err); >> + if (ret) { >> + error_report_err(err); >> + break; >> + } >> + >> + memory_region_iommu_replay(giommu->iommu, >> + &giommu->dirty_notify); >> + >> + memory_region_unregister_iommu_notifier(section->mr, >> + &giommu->dirty_notify); > > > Is it necessary to do the register/unregister? It seemed to me that > perhaps we could do a replay independent of those. > Earlier I thought to do a replay, we need to regsiter. But you are right, I verified replay works without registering. > I'd also be tempted to move dirty_notify to a temporary object rather > than store it on the giommu for such a brief usage, ie. define: > > struct giommu_dirty_notfier { > IOMMUNotifier n; > VFIOGuestIOMMU *giommu; > } > > struct giommu_dirty_notfier n = { .giommu = giommu }; > > iommu_notifier_init(&n,...); > > memory_region_iommu_replay(giommu->iommu, &n); > ... > > struct giommu_dirty_notfier *ndnotifier = container_of(n, struct giommu_dirty_notfier, n); > VFIOGuestIOMMU *giommu = n->giommu; > > It's nice that we remove the extra bloat of the list/tree entirely with > this approach. Thanks, > Thanks for your suggestion. Changing as you suggested above. Thanks, Kirti
diff --git a/hw/vfio/common.c b/hw/vfio/common.c index 2634387df948..98c2b1f9b190 100644 --- a/hw/vfio/common.c +++ b/hw/vfio/common.c @@ -442,8 +442,8 @@ static bool vfio_listener_skipped_section(MemoryRegionSection *section) } /* Called with rcu_read_lock held. */ -static bool vfio_get_vaddr(IOMMUTLBEntry *iotlb, void **vaddr, - bool *read_only) +static bool vfio_get_xlat_addr(IOMMUTLBEntry *iotlb, void **vaddr, + ram_addr_t *ram_addr, bool *read_only) { MemoryRegion *mr; hwaddr xlat; @@ -474,8 +474,17 @@ static bool vfio_get_vaddr(IOMMUTLBEntry *iotlb, void **vaddr, return false; } - *vaddr = memory_region_get_ram_ptr(mr) + xlat; - *read_only = !writable || mr->readonly; + if (vaddr) { + *vaddr = memory_region_get_ram_ptr(mr) + xlat; + } + + if (ram_addr) { + *ram_addr = memory_region_get_ram_addr(mr) + xlat; + } + + if (read_only) { + *read_only = !writable || mr->readonly; + } return true; } @@ -485,7 +494,6 @@ static void vfio_iommu_map_notify(IOMMUNotifier *n, IOMMUTLBEntry *iotlb) VFIOGuestIOMMU *giommu = container_of(n, VFIOGuestIOMMU, n); VFIOContainer *container = giommu->container; hwaddr iova = iotlb->iova + giommu->iommu_offset; - bool read_only; void *vaddr; int ret; @@ -501,7 +509,9 @@ static void vfio_iommu_map_notify(IOMMUNotifier *n, IOMMUTLBEntry *iotlb) rcu_read_lock(); if ((iotlb->perm & IOMMU_RW) != IOMMU_NONE) { - if (!vfio_get_vaddr(iotlb, &vaddr, &read_only)) { + bool read_only; + + if (!vfio_get_xlat_addr(iotlb, &vaddr, NULL, &read_only)) { goto out; } /* @@ -899,11 +909,84 @@ err_out: return ret; } +static void vfio_iommu_map_dirty_notify(IOMMUNotifier *n, IOMMUTLBEntry *iotlb) +{ + VFIOGuestIOMMU *giommu = container_of(n, VFIOGuestIOMMU, dirty_notify); + VFIOContainer *container = giommu->container; + hwaddr iova = iotlb->iova + giommu->iommu_offset; + ram_addr_t translated_addr; + + trace_vfio_iommu_map_dirty_notify(iova, iova + iotlb->addr_mask); + + if (iotlb->target_as != &address_space_memory) { + error_report("Wrong target AS \"%s\", only system memory is allowed", + iotlb->target_as->name ? iotlb->target_as->name : "none"); + return; + } + + rcu_read_lock(); + + if (vfio_get_xlat_addr(iotlb, NULL, &translated_addr, NULL)) { + int ret; + + ret = vfio_get_dirty_bitmap(container, iova, iotlb->addr_mask + 1, + translated_addr); + if (ret) { + error_report("vfio_iommu_map_dirty_notify(%p, 0x%"HWADDR_PRIx", " + "0x%"HWADDR_PRIx") = %d (%m)", + container, iova, + iotlb->addr_mask + 1, ret); + } + } + + rcu_read_unlock(); +} + static int vfio_sync_dirty_bitmap(VFIOContainer *container, MemoryRegionSection *section) { ram_addr_t ram_addr; + if (memory_region_is_iommu(section->mr)) { + VFIOGuestIOMMU *giommu; + int ret = 0; + + QLIST_FOREACH(giommu, &container->giommu_list, giommu_next) { + if (MEMORY_REGION(giommu->iommu) == section->mr && + giommu->n.start == section->offset_within_region) { + Int128 llend; + Error *err = NULL; + int idx = memory_region_iommu_attrs_to_index(giommu->iommu, + MEMTXATTRS_UNSPECIFIED); + + llend = int128_add(int128_make64(section->offset_within_region), + section->size); + llend = int128_sub(llend, int128_one()); + + iommu_notifier_init(&giommu->dirty_notify, + vfio_iommu_map_dirty_notify, + IOMMU_NOTIFIER_MAP, + section->offset_within_region, + int128_get64(llend), + idx); + ret = memory_region_register_iommu_notifier(section->mr, + &giommu->dirty_notify, &err); + if (ret) { + error_report_err(err); + break; + } + + memory_region_iommu_replay(giommu->iommu, + &giommu->dirty_notify); + + memory_region_unregister_iommu_notifier(section->mr, + &giommu->dirty_notify); + break; + } + } + return ret; + } + ram_addr = memory_region_get_ram_addr(section->mr) + section->offset_within_region; diff --git a/hw/vfio/trace-events b/hw/vfio/trace-events index c3b35aa2cce8..d9cb8998a228 100644 --- a/hw/vfio/trace-events +++ b/hw/vfio/trace-events @@ -163,3 +163,4 @@ vfio_load_device_config_state(const char *name) " (%s)" vfio_load_state(const char *name, uint64_t data) " (%s) data 0x%"PRIx64 vfio_load_state_device_data(const char *name, uint64_t data_offset, uint64_t data_size) " (%s) Offset 0x%"PRIx64" size 0x%"PRIx64 vfio_get_dirty_bitmap(int fd, uint64_t iova, uint64_t size, uint64_t bitmap_size, uint64_t start) "container fd=%d, iova=0x%"PRIx64" size= 0x%"PRIx64" bitmap_size=0x%"PRIx64" start=0x%"PRIx64 +vfio_iommu_map_dirty_notify(uint64_t iova_start, uint64_t iova_end) "iommu dirty @ 0x%"PRIx64" - 0x%"PRIx64 diff --git a/include/hw/vfio/vfio-common.h b/include/hw/vfio/vfio-common.h index b1c1b18fd228..92872fae59ee 100644 --- a/include/hw/vfio/vfio-common.h +++ b/include/hw/vfio/vfio-common.h @@ -99,6 +99,7 @@ typedef struct VFIOGuestIOMMU { IOMMUMemoryRegion *iommu; hwaddr iommu_offset; IOMMUNotifier n; + IOMMUNotifier dirty_notify; QLIST_ENTRY(VFIOGuestIOMMU) giommu_next; } VFIOGuestIOMMU;
When vIOMMU is enabled, register MAP notifier from log_sync when all devices in container are in stop and copy phase of migration. Call replay and get dirty pages from notifier callback. Suggested-by: Alex Williamson <alex.williamson@redhat.com> Signed-off-by: Kirti Wankhede <kwankhede@nvidia.com> --- hw/vfio/common.c | 95 ++++++++++++++++++++++++++++++++++++++++--- hw/vfio/trace-events | 1 + include/hw/vfio/vfio-common.h | 1 + 3 files changed, 91 insertions(+), 6 deletions(-)