@@ -2407,6 +2407,11 @@ static inline void kvm_prepare_memory_fault_exit(struct kvm_vcpu *vcpu,
vcpu->run->memory_fault.flags |= KVM_MEMORY_EXIT_FLAG_PRIVATE;
}
+static inline bool kvm_can_access_gmem(struct kvm *kvm)
+{
+ return kvm->arch.vm_type == KVM_X86_SW_PROTECTED_VM;
+}
+
#ifdef CONFIG_KVM_GENERIC_MEMORY_ATTRIBUTES
static inline unsigned long kvm_get_memory_attributes(struct kvm *kvm, gfn_t gfn)
{
@@ -3286,11 +3286,51 @@ static int __kvm_read_guest_page(struct kvm_memory_slot *slot, gfn_t gfn,
return 0;
}
+static int __kvm_read_guest_private_page(struct kvm *kvm,
+ struct kvm_memory_slot *memslot, gfn_t gfn,
+ void *data, int offset, int len)
+{
+ kvm_pfn_t pfn;
+ int r;
+ struct page *page;
+ void *kaddr;
+
+ if (!kvm_can_access_gmem(kvm))
+ return -EFAULT;
+
+ r = kvm_gmem_get_pfn(kvm, memslot, gfn, &pfn, NULL);
+
+ if (r < 0)
+ return -EFAULT;
+
+ page = pfn_to_page(pfn);
+ lock_page(page);
+ kaddr = page_address(page) + offset;
+ memcpy(data, kaddr, len);
+ unlock_page(page);
+ put_page(page);
+ return 0;
+}
+
+static int __kvm_vcpu_read_guest_private_page(struct kvm_vcpu *vcpu,
+ struct kvm_memory_slot *memslot, gfn_t gfn,
+ void *data, int offset, int len)
+{
+ if (!kvm_can_access_gmem(vcpu->kvm)) {
+ kvm_prepare_memory_fault_exit(vcpu, gfn + offset, len, false,
+ false, true);
+ return -EFAULT;
+ }
+ return __kvm_read_guest_private_page(vcpu->kvm, memslot, gfn, data, offset, len);
+}
+
int kvm_read_guest_page(struct kvm *kvm, gfn_t gfn, void *data, int offset,
int len)
{
struct kvm_memory_slot *slot = gfn_to_memslot(kvm, gfn);
+ if (kvm_mem_is_private(kvm, gfn))
+ return __kvm_read_guest_private_page(kvm, slot, gfn, data, offset, len);
return __kvm_read_guest_page(slot, gfn, data, offset, len);
}
EXPORT_SYMBOL_GPL(kvm_read_guest_page);
@@ -3300,6 +3340,8 @@ int kvm_vcpu_read_guest_page(struct kvm_vcpu *vcpu, gfn_t gfn, void *data,
{
struct kvm_memory_slot *slot = kvm_vcpu_gfn_to_memslot(vcpu, gfn);
+ if (kvm_mem_is_private(vcpu->kvm, gfn))
+ return __kvm_vcpu_read_guest_private_page(vcpu, slot, gfn, data, offset, len);
return __kvm_read_guest_page(slot, gfn, data, offset, len);
}
EXPORT_SYMBOL_GPL(kvm_vcpu_read_guest_page);
@@ -3390,11 +3432,52 @@ static int __kvm_write_guest_page(struct kvm *kvm,
return 0;
}
+static int __kvm_write_guest_private_page(struct kvm *kvm,
+ struct kvm_memory_slot *memslot, gfn_t gfn,
+ const void *data, int offset, int len)
+{
+ kvm_pfn_t pfn;
+ int r;
+ struct page *page;
+ void *kaddr;
+
+ if (!kvm_can_access_gmem(kvm))
+ return -EFAULT;
+
+ r = kvm_gmem_get_pfn(kvm, memslot, gfn, &pfn, NULL);
+
+ if (r < 0)
+ return -EFAULT;
+
+ page = pfn_to_page(pfn);
+ lock_page(page);
+ kaddr = page_address(page) + offset;
+ memcpy(kaddr, data, len);
+ unlock_page(page);
+ put_page(page);
+
+ return 0;
+}
+
+static int __kvm_vcpu_write_guest_private_page(struct kvm_vcpu *vcpu,
+ struct kvm_memory_slot *memslot, gfn_t gfn,
+ const void *data, int offset, int len)
+{
+ if (!kvm_can_access_gmem(vcpu->kvm)) {
+ kvm_prepare_memory_fault_exit(vcpu, gfn + offset, len, true,
+ false, true);
+ return -EFAULT;
+ }
+ return __kvm_write_guest_private_page(vcpu->kvm, memslot, gfn, data, offset, len);
+}
+
int kvm_write_guest_page(struct kvm *kvm, gfn_t gfn,
const void *data, int offset, int len)
{
struct kvm_memory_slot *slot = gfn_to_memslot(kvm, gfn);
+ if (kvm_mem_is_private(kvm, gfn))
+ return __kvm_write_guest_private_page(kvm, slot, gfn, data, offset, len);
return __kvm_write_guest_page(kvm, slot, gfn, data, offset, len);
}
EXPORT_SYMBOL_GPL(kvm_write_guest_page);
@@ -3404,6 +3487,8 @@ int kvm_vcpu_write_guest_page(struct kvm_vcpu *vcpu, gfn_t gfn,
{
struct kvm_memory_slot *slot = kvm_vcpu_gfn_to_memslot(vcpu, gfn);
+ if (kvm_mem_is_private(vcpu->kvm, gfn))
+ return __kvm_vcpu_write_guest_private_page(vcpu, slot, gfn, data, offset, len);
return __kvm_write_guest_page(vcpu->kvm, slot, gfn, data, offset, len);
}
EXPORT_SYMBOL_GPL(kvm_vcpu_write_guest_page);
If KVM can access guest-private memory without causing a host-kernel panic (e.g. currently only if the vm type is KVM_SW_PROTECTED_VM), allow `kvm_{read,write}_guest` to access gfns that are set to "private". If KVM cannot access guest-private memory (say, because it is running a TDX VM), prepare a KVM_EXIT_MEMORY_FAULT (if possible) and return -EFAULT. KVM can only prepare the memory fault exit inside the `kvm_vcpu_{read,write}_guest` variant, as it needs a vcpu reference to assign the exit reason to. KVM accesses guest-private memory via kernel virtual addresses/the direct map. In the special case of guest_memfd, it does not have to worry about gfn->pfn mappings being invalidated, since guest_memfd pages are immovable. Signed-off-by: Patrick Roy <roypat@amazon.co.uk> --- include/linux/kvm_host.h | 5 +++ virt/kvm/kvm_main.c | 85 ++++++++++++++++++++++++++++++++++++++++ 2 files changed, 90 insertions(+) base-commit: 771df9ffadb8204e61d3e98f36c5067102aab78f