diff mbox series

[RFC,KVM,17/27] kvm/isolation: improve mapping copy when mapping is already present

Message ID 1557758315-12667-18-git-send-email-alexandre.chartre@oracle.com (mailing list archive)
State New, archived
Headers show
Series KVM Address Space Isolation | expand

Commit Message

Alexandre Chartre May 13, 2019, 2:38 p.m. UTC
A mapping can already exist if a buffer was mapped in the KVM
address space, and then the buffer was freed but there was no
request to unmap from the KVM address space. In that case, clear
the existing mapping before mapping the new buffer.

Also if the new mapping is a subset of an already larger mapped
range, then remap the entire larger map.

Signed-off-by: Alexandre Chartre <alexandre.chartre@oracle.com>
---
 arch/x86/kvm/isolation.c |   67 +++++++++++++++++++++++++++++++++++++++++++---
 1 files changed, 63 insertions(+), 4 deletions(-)
diff mbox series

Patch

diff --git a/arch/x86/kvm/isolation.c b/arch/x86/kvm/isolation.c
index e494a15..539e287 100644
--- a/arch/x86/kvm/isolation.c
+++ b/arch/x86/kvm/isolation.c
@@ -88,6 +88,9 @@  struct mm_struct kvm_mm = {
 DEFINE_STATIC_KEY_FALSE(kvm_isolation_enabled);
 EXPORT_SYMBOL(kvm_isolation_enabled);
 
+static void kvm_clear_mapping(void *ptr, size_t size,
+			      enum page_table_level level);
+
 /*
  * When set to true, KVM #VMExit handlers run in isolated address space
  * which maps only KVM required code and per-VM information instead of
@@ -721,6 +724,7 @@  static int kvm_copy_mapping(void *ptr, size_t size, enum page_table_level level)
 {
 	unsigned long addr = (unsigned long)ptr;
 	unsigned long end = addr + ((unsigned long)size);
+	unsigned long range_addr, range_end;
 	struct kvm_range_mapping *range_mapping;
 	bool subset;
 	int err;
@@ -728,22 +732,77 @@  static int kvm_copy_mapping(void *ptr, size_t size, enum page_table_level level)
 	BUG_ON(current->mm == &kvm_mm);
 	pr_debug("KERNMAP COPY addr=%px size=%lx level=%d\n", ptr, size, level);
 
-	range_mapping = kmalloc(sizeof(struct kvm_range_mapping), GFP_KERNEL);
-	if (!range_mapping)
-		return -ENOMEM;
+	mutex_lock(&kvm_range_mapping_lock);
+
+	/*
+	 * A mapping can already exist if the buffer was mapped and then
+	 * freed but there was no request to unmap it. We might also be
+	 * trying to map a subset of an already mapped buffer.
+	 */
+	range_mapping = kvm_get_range_mapping_locked(ptr, &subset);
+	if (range_mapping) {
+		if (subset) {
+			pr_debug("range %px/%lx/%d is a subset of %px/%lx/%d already mapped, remapping\n",
+				 ptr, size, level, range_mapping->ptr,
+				 range_mapping->size, range_mapping->level);
+			range_addr = (unsigned long)range_mapping->ptr;
+			range_end = range_addr +
+				((unsigned long)range_mapping->size);
+			err = kvm_copy_pgd_range(&kvm_mm, current->mm,
+						 range_addr, range_end,
+						 range_mapping->level);
+			if (end <= range_end) {
+				/*
+				 * We effectively have a subset, fully contained
+				 * in the superset. So we are done.
+				 */
+				mutex_unlock(&kvm_range_mapping_lock);
+				return err;
+			}
+			/*
+			 * The new range is larger than the existing mapped
+			 * range. So we need an extra mapping to map the end
+			 * of the range.
+			 */
+			addr = range_end;
+			range_mapping = NULL;
+			pr_debug("adding extra range %lx-%lx (%d)\n", addr,
+				 end, level);
+		} else {
+			pr_debug("range %px size=%lx level=%d already mapped, clearing\n",
+				 range_mapping->ptr, range_mapping->size,
+				 range_mapping->level);
+			kvm_clear_mapping(range_mapping->ptr,
+					  range_mapping->size,
+					  range_mapping->level);
+			list_del(&range_mapping->list);
+		}
+	}
+
+	if (!range_mapping) {
+		range_mapping = kmalloc(sizeof(struct kvm_range_mapping),
+		    GFP_KERNEL);
+		if (!range_mapping) {
+			mutex_unlock(&kvm_range_mapping_lock);
+			return -ENOMEM;
+		}
+		INIT_LIST_HEAD(&range_mapping->list);
+	}
 
 	err = kvm_copy_pgd_range(&kvm_mm, current->mm, addr, end, level);
 	if (err) {
+		mutex_unlock(&kvm_range_mapping_lock);
 		kfree(range_mapping);
 		return err;
 	}
 
-	INIT_LIST_HEAD(&range_mapping->list);
 	range_mapping->ptr = ptr;
 	range_mapping->size = size;
 	range_mapping->level = level;
 	list_add(&range_mapping->list, &kvm_range_mapping_list);
 
+	mutex_unlock(&kvm_range_mapping_lock);
+
 	return 0;
 }