@@ -2464,6 +2464,9 @@ static inline bool kvm_mem_is_private(struct kvm *kvm, gfn_t gfn)
#ifdef CONFIG_KVM_PRIVATE_MEM
int kvm_gmem_get_pfn(struct kvm *kvm, struct kvm_memory_slot *slot,
gfn_t gfn, kvm_pfn_t *pfn, int *max_order);
+int kvm_gmem_get_pfn_locked(struct kvm *kvm, struct kvm_memory_slot *slot,
+ gfn_t gfn, kvm_pfn_t *pfn, int *max_order);
+
#else
static inline int kvm_gmem_get_pfn(struct kvm *kvm,
struct kvm_memory_slot *slot, gfn_t gfn,
@@ -2472,6 +2475,14 @@ static inline int kvm_gmem_get_pfn(struct kvm *kvm,
KVM_BUG_ON(1, kvm);
return -EIO;
}
+static inline int kvm_gmem_get_pfn_locked(struct kvm *kvm,
+ struct kvm_memory_slot *slot,
+ gfn_t gfn,
+ kvm_pfn_t *pfn, int *max_order)
+{
+ KVM_BUG_ON(1, kvm);
+ return -EIO;
+}
#endif /* CONFIG_KVM_PRIVATE_MEM */
#ifdef CONFIG_HAVE_KVM_ARCH_GMEM_PREPARE
@@ -714,34 +714,59 @@ __kvm_gmem_get_pfn(struct file *file, struct kvm_memory_slot *slot,
return folio;
}
-int kvm_gmem_get_pfn(struct kvm *kvm, struct kvm_memory_slot *slot,
- gfn_t gfn, kvm_pfn_t *pfn, int *max_order)
+static int
+kvm_gmem_get_pfn_folio_locked(struct kvm *kvm, struct kvm_memory_slot *slot,
+ gfn_t gfn, kvm_pfn_t *pfn, int *max_order,
+ struct folio **folio)
{
struct file *file = kvm_gmem_get_file(slot);
- struct folio *folio;
bool is_prepared = false;
int r = 0;
if (!file)
return -EFAULT;
- folio = __kvm_gmem_get_pfn(file, slot, gfn, pfn, &is_prepared, max_order);
- if (IS_ERR(folio)) {
- r = PTR_ERR(folio);
+ *folio = __kvm_gmem_get_pfn(file, slot, gfn, pfn, &is_prepared, max_order);
+ if (IS_ERR(*folio)) {
+ r = PTR_ERR(*folio);
goto out;
}
if (!is_prepared)
- r = kvm_gmem_prepare_folio(kvm, slot, gfn, folio);
+ r = kvm_gmem_prepare_folio(kvm, slot, gfn, *folio);
- folio_unlock(folio);
- if (r < 0)
- folio_put(folio);
+ if (r) {
+ folio_unlock(*folio);
+ folio_put(*folio);
+ }
out:
fput(file);
return r;
}
+
+int kvm_gmem_get_pfn_locked(struct kvm *kvm, struct kvm_memory_slot *slot,
+ gfn_t gfn, kvm_pfn_t *pfn, int *max_order)
+{
+ struct folio *folio;
+
+ return kvm_gmem_get_pfn_folio_locked(kvm, slot, gfn, pfn, max_order, &folio);
+
+}
+EXPORT_SYMBOL_GPL(kvm_gmem_get_pfn_locked);
+
+int kvm_gmem_get_pfn(struct kvm *kvm, struct kvm_memory_slot *slot,
+ gfn_t gfn, kvm_pfn_t *pfn, int *max_order)
+{
+ struct folio *folio;
+ int r;
+
+ r = kvm_gmem_get_pfn_folio_locked(kvm, slot, gfn, pfn, max_order, &folio);
+ if (!r)
+ folio_unlock(folio);
+
+ return r;
+}
EXPORT_SYMBOL_GPL(kvm_gmem_get_pfn);
#ifdef CONFIG_KVM_GENERIC_PRIVATE_MEM
Create a new variant of kvm_gmem_get_pfn(), which retains the folio lock if it returns successfully. This is needed in subsequent patches in order to protect against races when checking whether a folio can be mapped by the host. Signed-off-by: Fuad Tabba <tabba@google.com> --- include/linux/kvm_host.h | 11 ++++++++++ virt/kvm/guest_memfd.c | 45 +++++++++++++++++++++++++++++++--------- 2 files changed, 46 insertions(+), 10 deletions(-)