Message ID | 20240710234222.2333120-5-jthoughton@google.com (mailing list archive) |
---|---|
State | New, archived |
Headers | show |
Series | KVM: Post-copy live migration for guest_memfd | expand |
On Wed, Jul 10, 2024 at 4:42 PM James Houghton <jthoughton@google.com> wrote: > > Add gfn_has_userfault() that (1) checks that KVM Userfault is enabled, > and (2) that our particular gfn is a userfault gfn. > > Check gfn_has_userfault() as part of __gfn_to_hva_many to prevent > gfn->hva translations for userfault gfns. > > Signed-off-by: James Houghton <jthoughton@google.com> > --- > include/linux/kvm_host.h | 12 ++++++++++++ > virt/kvm/kvm_main.c | 3 +++ > 2 files changed, 15 insertions(+) > > diff --git a/include/linux/kvm_host.h b/include/linux/kvm_host.h > index c1eb59a3141b..4cca896fb44a 100644 > --- a/include/linux/kvm_host.h > +++ b/include/linux/kvm_host.h > @@ -140,6 +140,7 @@ static inline bool is_noslot_pfn(kvm_pfn_t pfn) > > #define KVM_HVA_ERR_BAD (PAGE_OFFSET) > #define KVM_HVA_ERR_RO_BAD (PAGE_OFFSET + PAGE_SIZE) > +#define KVM_HVA_ERR_USERFAULT (PAGE_OFFSET + 2 * PAGE_SIZE) > > static inline bool kvm_is_error_hva(unsigned long addr) > { > @@ -2493,4 +2494,15 @@ static inline bool kvm_userfault_enabled(struct kvm *kvm) > #endif > } > > +static inline bool gfn_has_userfault(struct kvm *kvm, gfn_t gfn) > +{ > +#ifdef CONFIG_KVM_USERFAULT > + return kvm_userfault_enabled(kvm) && > + (kvm_get_memory_attributes(kvm, gfn) & > + KVM_MEMORY_ATTRIBUTE_USERFAULT); > +#else > + return false; > +#endif > +} > + > #endif > diff --git a/virt/kvm/kvm_main.c b/virt/kvm/kvm_main.c > index ffa452a13672..758deb90a050 100644 > --- a/virt/kvm/kvm_main.c > +++ b/virt/kvm/kvm_main.c > @@ -2686,6 +2686,9 @@ static unsigned long __gfn_to_hva_many(const struct kvm_memory_slot *slot, gfn_t > if (memslot_is_readonly(slot) && write) > return KVM_HVA_ERR_RO_BAD; > > + if (gfn_has_userfault(slot->kvm, gfn)) > + return KVM_HVA_ERR_USERFAULT; You missed the "many" part :) Speaking of, to do this you'll need to convert all callers that pass in nr_pages to actually set the number of pages they need. Today KVM just checks from gfn to the end of the slot and returns the total number of pages via nr_pages. i.e. We could end up checking (and async fetching) the entire slot! > + > if (nr_pages) > *nr_pages = slot->npages - (gfn - slot->base_gfn); > > -- > 2.45.2.993.g49e7a77208-goog >
diff --git a/include/linux/kvm_host.h b/include/linux/kvm_host.h index c1eb59a3141b..4cca896fb44a 100644 --- a/include/linux/kvm_host.h +++ b/include/linux/kvm_host.h @@ -140,6 +140,7 @@ static inline bool is_noslot_pfn(kvm_pfn_t pfn) #define KVM_HVA_ERR_BAD (PAGE_OFFSET) #define KVM_HVA_ERR_RO_BAD (PAGE_OFFSET + PAGE_SIZE) +#define KVM_HVA_ERR_USERFAULT (PAGE_OFFSET + 2 * PAGE_SIZE) static inline bool kvm_is_error_hva(unsigned long addr) { @@ -2493,4 +2494,15 @@ static inline bool kvm_userfault_enabled(struct kvm *kvm) #endif } +static inline bool gfn_has_userfault(struct kvm *kvm, gfn_t gfn) +{ +#ifdef CONFIG_KVM_USERFAULT + return kvm_userfault_enabled(kvm) && + (kvm_get_memory_attributes(kvm, gfn) & + KVM_MEMORY_ATTRIBUTE_USERFAULT); +#else + return false; +#endif +} + #endif diff --git a/virt/kvm/kvm_main.c b/virt/kvm/kvm_main.c index ffa452a13672..758deb90a050 100644 --- a/virt/kvm/kvm_main.c +++ b/virt/kvm/kvm_main.c @@ -2686,6 +2686,9 @@ static unsigned long __gfn_to_hva_many(const struct kvm_memory_slot *slot, gfn_t if (memslot_is_readonly(slot) && write) return KVM_HVA_ERR_RO_BAD; + if (gfn_has_userfault(slot->kvm, gfn)) + return KVM_HVA_ERR_USERFAULT; + if (nr_pages) *nr_pages = slot->npages - (gfn - slot->base_gfn);
Add gfn_has_userfault() that (1) checks that KVM Userfault is enabled, and (2) that our particular gfn is a userfault gfn. Check gfn_has_userfault() as part of __gfn_to_hva_many to prevent gfn->hva translations for userfault gfns. Signed-off-by: James Houghton <jthoughton@google.com> --- include/linux/kvm_host.h | 12 ++++++++++++ virt/kvm/kvm_main.c | 3 +++ 2 files changed, 15 insertions(+)