@@ -32,6 +32,7 @@
#include "trace.h"
#include "qapi/error.h"
#include "migration/cpr.h"
+#include "migration/blocker.h"
#include "pci.h"
VFIOGroupList vfio_group_list =
@@ -132,6 +133,8 @@ static int vfio_legacy_dma_unmap(const VFIOContainerBase *bcontainer,
int ret;
Error *local_err = NULL;
+ assert(!container->reused);
+
if (iotlb && vfio_devices_all_dirty_tracking_started(bcontainer)) {
if (!vfio_devices_all_device_dirty_tracking(bcontainer) &&
bcontainer->dirty_pages_supported) {
@@ -183,12 +186,24 @@ static int vfio_legacy_dma_map(const VFIOContainerBase *bcontainer, hwaddr iova,
bcontainer);
struct vfio_iommu_type1_dma_map map = {
.argsz = sizeof(map),
- .flags = VFIO_DMA_MAP_FLAG_READ,
.vaddr = (__u64)(uintptr_t)vaddr,
.iova = iova,
.size = size,
};
+ /*
+ * Set the new vaddr for any mappings registered during cpr load.
+ * Reused is cleared thereafter.
+ */
+ if (container->reused) {
+ map.flags = VFIO_DMA_MAP_FLAG_VADDR;
+ if (ioctl(container->fd, VFIO_IOMMU_MAP_DMA, &map)) {
+ goto fail;
+ }
+ return 0;
+ }
+
+ map.flags = VFIO_DMA_MAP_FLAG_READ;
if (!readonly) {
map.flags |= VFIO_DMA_MAP_FLAG_WRITE;
}
@@ -205,7 +220,11 @@ static int vfio_legacy_dma_map(const VFIOContainerBase *bcontainer, hwaddr iova,
return 0;
}
- error_report("VFIO_MAP_DMA failed: %s", strerror(errno));
+fail:
+ error_report("vfio_dma_map %s (iova %lu, size %ld, va %p): %s",
+ (container->reused ? "VADDR" : ""), iova, size, vaddr,
+ strerror(errno));
+
return -errno;
}
@@ -689,8 +708,17 @@ static bool vfio_connect_container(VFIOGroup *group, AddressSpace *as,
group->container = container;
QLIST_INSERT_HEAD(&container->group_list, group, container_next);
- bcontainer->listener = vfio_memory_listener;
- memory_listener_register(&bcontainer->listener, bcontainer->space->as);
+ /*
+ * If reused, register the listener later, after all state that may
+ * affect regions and mapping boundaries has been cpr load'ed. Later,
+ * the listener will invoke its callback on each flat section and call
+ * vfio_dma_map to supply the new vaddr, and the calls will match the
+ * mappings remembered by the kernel.
+ */
+ if (!reused) {
+ bcontainer->listener = vfio_memory_listener;
+ memory_listener_register(&bcontainer->listener, bcontainer->space->as);
+ }
if (bcontainer->error) {
error_propagate_prepend(errp, bcontainer->error,
@@ -1002,6 +1030,13 @@ static bool vfio_legacy_attach_device(const char *name, VFIODevice *vbasedev,
return false;
}
+ if (vbasedev->mdev) {
+ error_setg(&vbasedev->cpr_mdev_blocker,
+ "CPR does not support vfio mdev %s", vbasedev->name);
+ migrate_add_blocker_modes(&vbasedev->cpr_mdev_blocker, &error_fatal,
+ MIG_MODE_CPR_TRANSFER, -1);
+ }
+
bcontainer = &group->container->bcontainer;
vbasedev->bcontainer = bcontainer;
QLIST_INSERT_HEAD(&bcontainer->device_list, vbasedev, container_next);
@@ -1018,6 +1053,7 @@ static void vfio_legacy_detach_device(VFIODevice *vbasedev)
QLIST_REMOVE(vbasedev, container_next);
vbasedev->bcontainer = NULL;
trace_vfio_detach_device(vbasedev->name, group->groupid);
+ migrate_del_blocker(&vbasedev->cpr_mdev_blocker);
vfio_put_base_device(vbasedev);
vfio_put_group(group);
}
@@ -14,6 +14,21 @@
#include "migration/vmstate.h"
#include "qapi/error.h"
+static bool vfio_dma_unmap_vaddr_all(VFIOContainer *container, Error **errp)
+{
+ struct vfio_iommu_type1_dma_unmap unmap = {
+ .argsz = sizeof(unmap),
+ .flags = VFIO_DMA_UNMAP_FLAG_VADDR | VFIO_DMA_UNMAP_FLAG_ALL,
+ .iova = 0,
+ .size = 0,
+ };
+ if (ioctl(container->fd, VFIO_IOMMU_UNMAP_DMA, &unmap)) {
+ error_setg_errno(errp, errno, "vfio_dma_unmap_vaddr_all");
+ return false;
+ }
+ return true;
+}
+
static bool vfio_cpr_supported(VFIOContainer *container, Error **errp)
{
if (!ioctl(container->fd, VFIO_CHECK_EXTENSION, VFIO_UPDATE_VADDR)) {
@@ -29,12 +44,27 @@ static bool vfio_cpr_supported(VFIOContainer *container, Error **errp)
}
}
+static int vfio_container_pre_save(void *opaque)
+{
+ VFIOContainer *container = opaque;
+ Error *err = NULL;
+
+ if (!vfio_dma_unmap_vaddr_all(container, &err)) {
+ error_report_err(err);
+ return -1;
+ }
+ return 0;
+}
+
static int vfio_container_post_load(void *opaque, int version_id)
{
VFIOContainer *container = opaque;
+ VFIOContainerBase *bcontainer = &container->bcontainer;
VFIOGroup *group;
VFIODevice *vbasedev;
+ bcontainer->listener = vfio_memory_listener;
+ memory_listener_register(&bcontainer->listener, bcontainer->space->as);
container->reused = false;
QLIST_FOREACH(group, &container->group_list, container_next) {
@@ -49,6 +79,8 @@ static const VMStateDescription vfio_container_vmstate = {
.name = "vfio-container",
.version_id = 0,
.minimum_version_id = 0,
+ .priority = MIG_PRI_LOW, /* Must happen after devices and groups */
+ .pre_save = vfio_container_pre_save,
.post_load = vfio_container_post_load,
.needed = cpr_needed_for_reuse,
.fields = (VMStateField[]) {
@@ -143,6 +143,7 @@ typedef struct VFIODevice {
unsigned int flags;
VFIOMigration *migration;
Error *migration_blocker;
+ Error *cpr_mdev_blocker;
OnOffAuto pre_copy_dirty_page_tracking;
OnOffAuto device_dirty_page_tracking;
bool dirty_pages_supported;
@@ -310,6 +311,8 @@ int vfio_devices_query_dirty_bitmap(const VFIOContainerBase *bcontainer,
int vfio_get_dirty_bitmap(const VFIOContainerBase *bcontainer, uint64_t iova,
uint64_t size, ram_addr_t ram_addr, Error **errp);
+void vfio_listener_register(VFIOContainerBase *bcontainer);
+
/* Returns 0 on success, or a negative errno. */
bool vfio_device_get_name(VFIODevice *vbasedev, Error **errp);
void vfio_device_set_fd(VFIODevice *vbasedev, const char *str, Error **errp);
Preserve DMA mappings during cpr-transfer. In the container pre_save handler, suspend the use of virtual addresses in DMA mappings with VFIO_DMA_UNMAP_FLAG_VADDR, because guest RAM will be remapped at a different VA after exec. DMA to already-mapped pages continues. Because the vaddr is temporarily invalid, mediated devices cannot be supported, so add a blocker for them. This restriction will not apply to iommufd containers when CPR is added for them in a future patch. In new QEMU, do not register the memory listener at device creation time. Register it later, in the container post_load handler, after all vmstate that may affect regions and mapping boundaries has been loaded. The post_load registration will cause the listener to invoke its callback on each flat section, and the calls will match the mappings remembered by the kernel. Modify vfio_dma_map (which is called by the listener) to pass the new VA to the kernel using VFIO_DMA_MAP_FLAG_VADDR. Signed-off-by: Steve Sistare <steven.sistare@oracle.com> --- hw/vfio/container.c | 44 +++++++++++++++++++++++++++++++++++++++---- hw/vfio/cpr-legacy.c | 32 +++++++++++++++++++++++++++++++ include/hw/vfio/vfio-common.h | 3 +++ 3 files changed, 75 insertions(+), 4 deletions(-)