@@ -133,6 +133,8 @@ static int vfio_legacy_dma_unmap(const VFIOContainerBase *bcontainer,
int ret;
Error *local_err = NULL;
+ assert(!container->cpr.reused);
+
if (iotlb && vfio_devices_all_dirty_tracking_started(bcontainer)) {
if (!vfio_devices_all_device_dirty_tracking(bcontainer) &&
bcontainer->dirty_pages_supported) {
@@ -688,8 +690,17 @@ static bool vfio_connect_container(VFIOGroup *group, AddressSpace *as,
}
group_was_added = true;
- bcontainer->listener = vfio_memory_listener;
- memory_listener_register(&bcontainer->listener, bcontainer->space->as);
+ /*
+ * If reused, register the listener later, after all state that may
+ * affect regions and mapping boundaries has been cpr load'ed. Later,
+ * the listener will invoke its callback on each flat section and call
+ * dma_map to supply the new vaddr, and the calls will match the mappings
+ * remembered by the kernel.
+ */
+ if (!cpr_reused) {
+ bcontainer->listener = vfio_memory_listener;
+ memory_listener_register(&bcontainer->listener, bcontainer->space->as);
+ }
if (bcontainer->error) {
error_propagate_prepend(errp, bcontainer->error,
@@ -14,6 +14,7 @@
#include "migration/migration.h"
#include "migration/vmstate.h"
#include "qapi/error.h"
+#include "qemu/error-report.h"
static bool vfio_dma_unmap_vaddr_all(VFIOContainer *container, Error **errp)
{
@@ -30,6 +31,34 @@ static bool vfio_dma_unmap_vaddr_all(VFIOContainer *container, Error **errp)
return true;
}
+/*
+ * Set the new @vaddr for any mappings registered during cpr load.
+ * Reused is cleared thereafter.
+ */
+static int vfio_legacy_cpr_dma_map(const VFIOContainerBase *bcontainer,
+ hwaddr iova, ram_addr_t size, void *vaddr,
+ bool readonly)
+{
+ const VFIOContainer *container = container_of(bcontainer, VFIOContainer,
+ bcontainer);
+ struct vfio_iommu_type1_dma_map map = {
+ .argsz = sizeof(map),
+ .flags = VFIO_DMA_MAP_FLAG_VADDR,
+ .vaddr = (__u64)(uintptr_t)vaddr,
+ .iova = iova,
+ .size = size,
+ };
+
+ assert(container->cpr.reused);
+
+ if (ioctl(container->fd, VFIO_IOMMU_MAP_DMA, &map)) {
+ error_report("vfio_legacy_cpr_dma_map (iova %lu, size %ld, va %p): %s",
+ iova, size, vaddr, strerror(errno));
+ return -errno;
+ }
+
+ return 0;
+}
static bool vfio_cpr_supported(VFIOContainer *container, Error **errp)
{
@@ -61,12 +90,20 @@ static int vfio_container_pre_save(void *opaque)
static int vfio_container_post_load(void *opaque, int version_id)
{
VFIOContainer *container = opaque;
+ VFIOContainerBase *bcontainer = &container->bcontainer;
VFIOGroup *group;
VFIODevice *vbasedev;
+ bcontainer->listener = vfio_memory_listener;
+ memory_listener_register(&bcontainer->listener, bcontainer->space->as);
container->cpr.reused = false;
QLIST_FOREACH(group, &container->group_list, container_next) {
+ VFIOIOMMUClass *vioc = VFIO_IOMMU_GET_CLASS(bcontainer);
+
+ /* Restore original dma_map function */
+ vioc->dma_map = vfio_legacy_dma_map;
+
QLIST_FOREACH(vbasedev, &group->device_list, next) {
vbasedev->cpr.reused = false;
}
@@ -78,6 +115,7 @@ static const VMStateDescription vfio_container_vmstate = {
.name = "vfio-container",
.version_id = 0,
.minimum_version_id = 0,
+ .priority = MIG_PRI_LOW, /* Must happen after devices and groups */
.pre_save = vfio_container_pre_save,
.post_load = vfio_container_post_load,
.needed = cpr_needed_for_reuse,
@@ -102,6 +140,11 @@ bool vfio_legacy_cpr_register_container(VFIOContainer *container, Error **errp)
vmstate_register(NULL, -1, &vfio_container_vmstate, container);
+ /* During incoming CPR, divert calls to dma_map. */
+ if (container->cpr.reused) {
+ VFIOIOMMUClass *vioc = VFIO_IOMMU_GET_CLASS(bcontainer);
+ vioc->dma_map = vfio_legacy_cpr_dma_map;
+ }
return true;
}
In new QEMU, do not register the memory listener at device creation time. Register it later, in the container post_load handler, after all vmstate that may affect regions and mapping boundaries has been loaded. The post_load registration will cause the listener to invoke its callback on each flat section, and the calls will match the mappings remembered by the kernel. The listener calls a special dma_map handler that passes the new VA of each section to the kernel using VFIO_DMA_MAP_FLAG_VADDR. Restore the normal handler at the end. Signed-off-by: Steve Sistare <steven.sistare@oracle.com> --- hw/vfio/container.c | 15 +++++++++++++-- hw/vfio/cpr-legacy.c | 43 +++++++++++++++++++++++++++++++++++++++++++ 2 files changed, 56 insertions(+), 2 deletions(-)