Message ID | 20230914015531.1419405-3-seanjc@google.com (mailing list archive) |
---|---|
State | Superseded |
Headers | show |
Series | KVM: guest_memfd() and per-page attributes | expand |
Context | Check | Description |
---|---|---|
conchuod/cover_letter | success | Series has a cover letter |
conchuod/tree_selection | success | Guessed tree name to be for-next at HEAD 0bb80ecc33a8 |
conchuod/fixes_present | success | Fixes tag not required for -next series |
conchuod/maintainers_pattern | success | MAINTAINERS pattern errors before the patch: 5 and now 5 |
conchuod/verify_signedoff | success | Signed-off-by tag matches author and committer |
conchuod/kdoc | success | Errors and warnings before: 4 this patch: 4 |
conchuod/build_rv64_clang_allmodconfig | success | Errors and warnings before: 9 this patch: 9 |
conchuod/module_param | success | Was 0 now: 0 |
conchuod/build_rv64_gcc_allmodconfig | success | Errors and warnings before: 9 this patch: 9 |
conchuod/build_rv32_defconfig | success | Build OK |
conchuod/dtb_warn_rv64 | success | Errors and warnings before: 25 this patch: 25 |
conchuod/header_inline | success | No static functions without inline keyword in header files |
conchuod/checkpatch | success | total: 0 errors, 0 warnings, 0 checks, 204 lines checked |
conchuod/build_rv64_nommu_k210_defconfig | success | Build OK |
conchuod/verify_fixes | success | No Fixes tag |
conchuod/build_rv64_nommu_virt_defconfig | success | Build OK |
On 9/14/2023 9:55 AM, Sean Christopherson wrote: > From: Chao Peng <chao.p.peng@linux.intel.com> > > Currently in mmu_notifier invalidate path, hva range is recorded and > then checked against by mmu_notifier_retry_hva() in the page fault > handling path. However, for the to be introduced private memory, a page > fault may not have a hva associated, checking gfn(gpa) makes more sense. > > For existing hva based shared memory, gfn is expected to also work. The > only downside is when aliasing multiple gfns to a single hva, the > current algorithm of checking multiple ranges could result in a much > larger range being rejected. Such aliasing should be uncommon, so the > impact is expected small. > > Suggested-by: Sean Christopherson <seanjc@google.com> > Signed-off-by: Chao Peng <chao.p.peng@linux.intel.com> > Reviewed-by: Fuad Tabba <tabba@google.com> > Tested-by: Fuad Tabba <tabba@google.com> > [sean: convert vmx_set_apic_access_page_addr() to gfn-based API] > Signed-off-by: Sean Christopherson <seanjc@google.com> > --- > arch/x86/kvm/mmu/mmu.c | 10 ++++++---- > arch/x86/kvm/vmx/vmx.c | 11 +++++------ > include/linux/kvm_host.h | 33 +++++++++++++++++++++------------ > virt/kvm/kvm_main.c | 40 +++++++++++++++++++++++++++++++--------- > 4 files changed, 63 insertions(+), 31 deletions(-) > [...] > > -void kvm_mmu_invalidate_begin(struct kvm *kvm, unsigned long start, > - unsigned long end) > +void kvm_mmu_invalidate_begin(struct kvm *kvm) > { > + lockdep_assert_held_write(&kvm->mmu_lock); > /* > * The count increase must become visible at unlock time as no > * spte can be established without taking the mmu_lock and > * count is also read inside the mmu_lock critical section. > */ > kvm->mmu_invalidate_in_progress++; > + > + if (likely(kvm->mmu_invalidate_in_progress == 1)) > + kvm->mmu_invalidate_range_start = INVALID_GPA; > +} > + > +void kvm_mmu_invalidate_range_add(struct kvm *kvm, gfn_t start, gfn_t end) > +{ > + lockdep_assert_held_write(&kvm->mmu_lock); > + > + WARN_ON_ONCE(!kvm->mmu_invalidate_in_progress); > + > if (likely(kvm->mmu_invalidate_in_progress == 1)) { > kvm->mmu_invalidate_range_start = start; > kvm->mmu_invalidate_range_end = end; > @@ -771,6 +781,12 @@ void kvm_mmu_invalidate_begin(struct kvm *kvm, unsigned long start, > } > } > > +static bool kvm_mmu_unmap_gfn_range(struct kvm *kvm, struct kvm_gfn_range *range) > +{ > + kvm_mmu_invalidate_range_add(kvm, range->start, range->end); > + return kvm_unmap_gfn_range(kvm, range); > +} > + > static int kvm_mmu_notifier_invalidate_range_start(struct mmu_notifier *mn, > const struct mmu_notifier_range *range) > { > @@ -778,7 +794,7 @@ static int kvm_mmu_notifier_invalidate_range_start(struct mmu_notifier *mn, > const struct kvm_mmu_notifier_range hva_range = { > .start = range->start, > .end = range->end, > - .handler = kvm_unmap_gfn_range, > + .handler = kvm_mmu_unmap_gfn_range, > .on_lock = kvm_mmu_invalidate_begin, > .on_unlock = kvm_arch_guest_memory_reclaimed, > .flush_on_ret = true, > @@ -817,8 +833,7 @@ static int kvm_mmu_notifier_invalidate_range_start(struct mmu_notifier *mn, > return 0; > } > > -void kvm_mmu_invalidate_end(struct kvm *kvm, unsigned long start, > - unsigned long end) > +void kvm_mmu_invalidate_end(struct kvm *kvm) > { > /* > * This sequence increase will notify the kvm page fault that > @@ -833,6 +848,13 @@ void kvm_mmu_invalidate_end(struct kvm *kvm, unsigned long start, > * in conjunction with the smp_rmb in mmu_invalidate_retry(). > */ > kvm->mmu_invalidate_in_progress--; > + > + /* > + * Assert that at least one range must be added between start() and > + * end(). Not adding a range isn't fatal, but it is a KVM bug. > + */ > + WARN_ON_ONCE(kvm->mmu_invalidate_in_progress && > + kvm->mmu_invalidate_range_start == INVALID_GPA); Should the check happen before the decrease of kvm->mmu_invalidate_in_progress? Otherwise, KVM calls kvm_mmu_invalidate_begin(), then kvm_mmu_invalidate_end() the check will not take effect. > } > > static void kvm_mmu_notifier_invalidate_range_end(struct mmu_notifier *mn,
On Thu, Sep 14, 2023, Binbin Wu wrote: > > On 9/14/2023 9:55 AM, Sean Christopherson wrote: > > +void kvm_mmu_invalidate_end(struct kvm *kvm) > > { > > /* > > * This sequence increase will notify the kvm page fault that > > @@ -833,6 +848,13 @@ void kvm_mmu_invalidate_end(struct kvm *kvm, unsigned long start, > > * in conjunction with the smp_rmb in mmu_invalidate_retry(). > > */ > > kvm->mmu_invalidate_in_progress--; > > + > > + /* > > + * Assert that at least one range must be added between start() and > > + * end(). Not adding a range isn't fatal, but it is a KVM bug. > > + */ > > + WARN_ON_ONCE(kvm->mmu_invalidate_in_progress && > > + kvm->mmu_invalidate_range_start == INVALID_GPA); > Should the check happen before the decrease of kvm->mmu_invalidate_in_progress? > Otherwise, KVM calls kvm_mmu_invalidate_begin(), then kvm_mmu_invalidate_end() > the check will not take effect. Indeed. I'm pretty sure I added this code, not sure what I was thinking. There's no reason to check mmu_invalidate_in_progress, it's not like KVM allows mmu_invalidate_in_progress to go negative. The comment is also a bit funky. I'll post a fixup patch to make it look like this (assuming I'm not forgetting a subtle reason for guarding the check with the in-progress flag): /* * Assert that at least one range was added between start() and end(). * Not adding a range isn't fatal, but it is a KVM bug. */ WARN_ON_ONCE(kvm->mmu_invalidate_range_start == INVALID_GPA); Regarding kvm->mmu_invalidate_in_progress, this would be a good opportunity to move the BUG_ON() into the common end(), e.g. as is, an end() without a start() from something other than the generic mmu_notifier would go unnoticed. And I _think_ we can replace the BUG_ON() with a KVM_BUG_ON() without putting the kernel at risk. E.g. diff --git a/virt/kvm/kvm_main.c b/virt/kvm/kvm_main.c index dd948276e5d6..54480655bcce 100644 --- a/virt/kvm/kvm_main.c +++ b/virt/kvm/kvm_main.c @@ -870,6 +870,7 @@ void kvm_mmu_invalidate_end(struct kvm *kvm) * in conjunction with the smp_rmb in mmu_invalidate_retry(). */ kvm->mmu_invalidate_in_progress--; + KVM_BUG_ON(kvm->mmu_invalidate_in_progress < 0, kvm); /* * Assert that at least one range was added between start() and end(). @@ -905,8 +906,6 @@ static void kvm_mmu_notifier_invalidate_range_end(struct mmu_notifier *mn, */ if (wake) rcuwait_wake_up(&kvm->mn_memslots_update_rcuwait); - - BUG_ON(kvm->mmu_invalidate_in_progress < 0); } static int kvm_mmu_notifier_clear_flush_young(struct mmu_notifier *mn,
On 2023-09-13 at 18:55:00 -0700, Sean Christopherson wrote: > From: Chao Peng <chao.p.peng@linux.intel.com> > > Currently in mmu_notifier invalidate path, hva range is recorded and > then checked against by mmu_notifier_retry_hva() in the page fault ^ Now it is mmu_invalidate_retry_hva(). > handling path. However, for the to be introduced private memory, a page > fault may not have a hva associated, checking gfn(gpa) makes more sense. > > For existing hva based shared memory, gfn is expected to also work. The > only downside is when aliasing multiple gfns to a single hva, the > current algorithm of checking multiple ranges could result in a much > larger range being rejected. Such aliasing should be uncommon, so the > impact is expected small. > > Suggested-by: Sean Christopherson <seanjc@google.com> > Signed-off-by: Chao Peng <chao.p.peng@linux.intel.com> > Reviewed-by: Fuad Tabba <tabba@google.com> > Tested-by: Fuad Tabba <tabba@google.com> > [sean: convert vmx_set_apic_access_page_addr() to gfn-based API] > Signed-off-by: Sean Christopherson <seanjc@google.com> > --- > arch/x86/kvm/mmu/mmu.c | 10 ++++++---- > arch/x86/kvm/vmx/vmx.c | 11 +++++------ > include/linux/kvm_host.h | 33 +++++++++++++++++++++------------ > virt/kvm/kvm_main.c | 40 +++++++++++++++++++++++++++++++--------- > 4 files changed, 63 insertions(+), 31 deletions(-) > > diff --git a/arch/x86/kvm/mmu/mmu.c b/arch/x86/kvm/mmu/mmu.c > index e1d011c67cc6..0f0231d2b74f 100644 > --- a/arch/x86/kvm/mmu/mmu.c > +++ b/arch/x86/kvm/mmu/mmu.c > @@ -3056,7 +3056,7 @@ static void direct_pte_prefetch(struct kvm_vcpu *vcpu, u64 *sptep) > * > * There are several ways to safely use this helper: > * > - * - Check mmu_invalidate_retry_hva() after grabbing the mapping level, before > + * - Check mmu_invalidate_retry_gfn() after grabbing the mapping level, before > * consuming it. In this case, mmu_lock doesn't need to be held during the > * lookup, but it does need to be held while checking the MMU notifier. > * > @@ -4358,7 +4358,7 @@ static bool is_page_fault_stale(struct kvm_vcpu *vcpu, > return true; > > return fault->slot && > - mmu_invalidate_retry_hva(vcpu->kvm, fault->mmu_seq, fault->hva); > + mmu_invalidate_retry_gfn(vcpu->kvm, fault->mmu_seq, fault->gfn); > } > > static int direct_page_fault(struct kvm_vcpu *vcpu, struct kvm_page_fault *fault) > @@ -6253,7 +6253,9 @@ void kvm_zap_gfn_range(struct kvm *kvm, gfn_t gfn_start, gfn_t gfn_end) > > write_lock(&kvm->mmu_lock); > > - kvm_mmu_invalidate_begin(kvm, 0, -1ul); > + kvm_mmu_invalidate_begin(kvm); > + > + kvm_mmu_invalidate_range_add(kvm, gfn_start, gfn_end); > > flush = kvm_rmap_zap_gfn_range(kvm, gfn_start, gfn_end); > > @@ -6266,7 +6268,7 @@ void kvm_zap_gfn_range(struct kvm *kvm, gfn_t gfn_start, gfn_t gfn_end) > if (flush) > kvm_flush_remote_tlbs_range(kvm, gfn_start, gfn_end - gfn_start); > > - kvm_mmu_invalidate_end(kvm, 0, -1ul); > + kvm_mmu_invalidate_end(kvm); > > write_unlock(&kvm->mmu_lock); > } > diff --git a/arch/x86/kvm/vmx/vmx.c b/arch/x86/kvm/vmx/vmx.c > index 72e3943f3693..6e502ba93141 100644 > --- a/arch/x86/kvm/vmx/vmx.c > +++ b/arch/x86/kvm/vmx/vmx.c > @@ -6757,10 +6757,10 @@ static void vmx_set_apic_access_page_addr(struct kvm_vcpu *vcpu) > return; > > /* > - * Grab the memslot so that the hva lookup for the mmu_notifier retry > - * is guaranteed to use the same memslot as the pfn lookup, i.e. rely > - * on the pfn lookup's validation of the memslot to ensure a valid hva > - * is used for the retry check. > + * Explicitly grab the memslot using KVM's internal slot ID to ensure > + * KVM doesn't unintentionally grab a userspace memslot. It _should_ > + * be impossible for userspace to create a memslot for the APIC when > + * APICv is enabled, but paranoia won't hurt in this case. > */ > slot = id_to_memslot(slots, APIC_ACCESS_PAGE_PRIVATE_MEMSLOT); > if (!slot || slot->flags & KVM_MEMSLOT_INVALID) > @@ -6785,8 +6785,7 @@ static void vmx_set_apic_access_page_addr(struct kvm_vcpu *vcpu) > return; > > read_lock(&vcpu->kvm->mmu_lock); > - if (mmu_invalidate_retry_hva(kvm, mmu_seq, > - gfn_to_hva_memslot(slot, gfn))) { > + if (mmu_invalidate_retry_gfn(kvm, mmu_seq, gfn)) { > kvm_make_request(KVM_REQ_APIC_PAGE_RELOAD, vcpu); > read_unlock(&vcpu->kvm->mmu_lock); > goto out; > diff --git a/include/linux/kvm_host.h b/include/linux/kvm_host.h > index fb6c6109fdca..11d091688346 100644 > --- a/include/linux/kvm_host.h > +++ b/include/linux/kvm_host.h > @@ -787,8 +787,8 @@ struct kvm { > struct mmu_notifier mmu_notifier; > unsigned long mmu_invalidate_seq; > long mmu_invalidate_in_progress; > - unsigned long mmu_invalidate_range_start; > - unsigned long mmu_invalidate_range_end; > + gfn_t mmu_invalidate_range_start; > + gfn_t mmu_invalidate_range_end; > #endif > struct list_head devices; > u64 manual_dirty_log_protect; > @@ -1392,10 +1392,9 @@ void kvm_mmu_free_memory_cache(struct kvm_mmu_memory_cache *mc); > void *kvm_mmu_memory_cache_alloc(struct kvm_mmu_memory_cache *mc); > #endif > > -void kvm_mmu_invalidate_begin(struct kvm *kvm, unsigned long start, > - unsigned long end); > -void kvm_mmu_invalidate_end(struct kvm *kvm, unsigned long start, > - unsigned long end); > +void kvm_mmu_invalidate_begin(struct kvm *kvm); > +void kvm_mmu_invalidate_range_add(struct kvm *kvm, gfn_t start, gfn_t end); > +void kvm_mmu_invalidate_end(struct kvm *kvm); > > long kvm_arch_dev_ioctl(struct file *filp, > unsigned int ioctl, unsigned long arg); > @@ -1970,9 +1969,9 @@ static inline int mmu_invalidate_retry(struct kvm *kvm, unsigned long mmu_seq) > return 0; > } > > -static inline int mmu_invalidate_retry_hva(struct kvm *kvm, > +static inline int mmu_invalidate_retry_gfn(struct kvm *kvm, > unsigned long mmu_seq, > - unsigned long hva) > + gfn_t gfn) > { > lockdep_assert_held(&kvm->mmu_lock); > /* > @@ -1981,10 +1980,20 @@ static inline int mmu_invalidate_retry_hva(struct kvm *kvm, > * that might be being invalidated. Note that it may include some false > * positives, due to shortcuts when handing concurrent invalidations. > */ > - if (unlikely(kvm->mmu_invalidate_in_progress) && > - hva >= kvm->mmu_invalidate_range_start && > - hva < kvm->mmu_invalidate_range_end) > - return 1; > + if (unlikely(kvm->mmu_invalidate_in_progress)) { > + /* > + * Dropping mmu_lock after bumping mmu_invalidate_in_progress > + * but before updating the range is a KVM bug. > + */ > + if (WARN_ON_ONCE(kvm->mmu_invalidate_range_start == INVALID_GPA || > + kvm->mmu_invalidate_range_end == INVALID_GPA)) > + return 1; > + > + if (gfn >= kvm->mmu_invalidate_range_start && > + gfn < kvm->mmu_invalidate_range_end) > + return 1; > + } > + > if (kvm->mmu_invalidate_seq != mmu_seq) > return 1; > return 0; > diff --git a/virt/kvm/kvm_main.c b/virt/kvm/kvm_main.c > index 0524933856d4..4fad3b01dc1f 100644 > --- a/virt/kvm/kvm_main.c > +++ b/virt/kvm/kvm_main.c > @@ -543,9 +543,7 @@ static inline struct kvm *mmu_notifier_to_kvm(struct mmu_notifier *mn) > > typedef bool (*gfn_handler_t)(struct kvm *kvm, struct kvm_gfn_range *range); > > -typedef void (*on_lock_fn_t)(struct kvm *kvm, unsigned long start, > - unsigned long end); > - > +typedef void (*on_lock_fn_t)(struct kvm *kvm); > typedef void (*on_unlock_fn_t)(struct kvm *kvm); > > struct kvm_mmu_notifier_range { > @@ -637,7 +635,8 @@ static __always_inline int __kvm_handle_hva_range(struct kvm *kvm, > locked = true; > KVM_MMU_LOCK(kvm); > if (!IS_KVM_NULL_FN(range->on_lock)) > - range->on_lock(kvm, range->start, range->end); > + range->on_lock(kvm); > + > if (IS_KVM_NULL_FN(range->handler)) > break; > } > @@ -742,15 +741,26 @@ static void kvm_mmu_notifier_change_pte(struct mmu_notifier *mn, > kvm_handle_hva_range(mn, address, address + 1, arg, kvm_change_spte_gfn); > } > > -void kvm_mmu_invalidate_begin(struct kvm *kvm, unsigned long start, > - unsigned long end) > +void kvm_mmu_invalidate_begin(struct kvm *kvm) > { > + lockdep_assert_held_write(&kvm->mmu_lock); > /* > * The count increase must become visible at unlock time as no > * spte can be established without taking the mmu_lock and > * count is also read inside the mmu_lock critical section. > */ > kvm->mmu_invalidate_in_progress++; > + > + if (likely(kvm->mmu_invalidate_in_progress == 1)) > + kvm->mmu_invalidate_range_start = INVALID_GPA; > +} > + > +void kvm_mmu_invalidate_range_add(struct kvm *kvm, gfn_t start, gfn_t end) > +{ > + lockdep_assert_held_write(&kvm->mmu_lock); > + > + WARN_ON_ONCE(!kvm->mmu_invalidate_in_progress); > + > if (likely(kvm->mmu_invalidate_in_progress == 1)) { > kvm->mmu_invalidate_range_start = start; > kvm->mmu_invalidate_range_end = end; IIUC, Now we only add or override a part of the invalidate range in these fields, IOW only the range in last slot is stored when we unlock. That may break mmu_invalidate_retry_gfn() cause it can never know the whole invalidate range. How about we extend the mmu_invalidate_range_start/end everytime so that it records the whole invalidate range: if (kvm->mmu_invalidate_range_start == INVALID_GPA) { kvm->mmu_invalidate_range_start = start; kvm->mmu_invalidate_range_end = end; } else { kvm->mmu_invalidate_range_start = min(kvm->mmu_invalidate_range_start, start); kvm->mmu_invalidate_range_end = max(kvm->mmu_invalidate_range_end, end); } Thanks, Yilun > @@ -771,6 +781,12 @@ void kvm_mmu_invalidate_begin(struct kvm *kvm, unsigned long start, > } > } > > +static bool kvm_mmu_unmap_gfn_range(struct kvm *kvm, struct kvm_gfn_range *range) > +{ > + kvm_mmu_invalidate_range_add(kvm, range->start, range->end); > + return kvm_unmap_gfn_range(kvm, range); > +} > + > static int kvm_mmu_notifier_invalidate_range_start(struct mmu_notifier *mn, > const struct mmu_notifier_range *range) > { > @@ -778,7 +794,7 @@ static int kvm_mmu_notifier_invalidate_range_start(struct mmu_notifier *mn, > const struct kvm_mmu_notifier_range hva_range = { > .start = range->start, > .end = range->end, > - .handler = kvm_unmap_gfn_range, > + .handler = kvm_mmu_unmap_gfn_range, > .on_lock = kvm_mmu_invalidate_begin, > .on_unlock = kvm_arch_guest_memory_reclaimed, > .flush_on_ret = true, > @@ -817,8 +833,7 @@ static int kvm_mmu_notifier_invalidate_range_start(struct mmu_notifier *mn, > return 0; > } > > -void kvm_mmu_invalidate_end(struct kvm *kvm, unsigned long start, > - unsigned long end) > +void kvm_mmu_invalidate_end(struct kvm *kvm) > { > /* > * This sequence increase will notify the kvm page fault that > @@ -833,6 +848,13 @@ void kvm_mmu_invalidate_end(struct kvm *kvm, unsigned long start, > * in conjunction with the smp_rmb in mmu_invalidate_retry(). > */ > kvm->mmu_invalidate_in_progress--; > + > + /* > + * Assert that at least one range must be added between start() and > + * end(). Not adding a range isn't fatal, but it is a KVM bug. > + */ > + WARN_ON_ONCE(kvm->mmu_invalidate_in_progress && > + kvm->mmu_invalidate_range_start == INVALID_GPA); > } > > static void kvm_mmu_notifier_invalidate_range_end(struct mmu_notifier *mn, > -- > 2.42.0.283.g2d96d420d3-goog >
On Wed, Sep 20, 2023, Xu Yilun wrote: > On 2023-09-13 at 18:55:00 -0700, Sean Christopherson wrote: > > +void kvm_mmu_invalidate_range_add(struct kvm *kvm, gfn_t start, gfn_t end) > > +{ > > + lockdep_assert_held_write(&kvm->mmu_lock); > > + > > + WARN_ON_ONCE(!kvm->mmu_invalidate_in_progress); > > + > > if (likely(kvm->mmu_invalidate_in_progress == 1)) { > > kvm->mmu_invalidate_range_start = start; > > kvm->mmu_invalidate_range_end = end; > > IIUC, Now we only add or override a part of the invalidate range in > these fields, IOW only the range in last slot is stored when we unlock. Ouch. Good catch! > That may break mmu_invalidate_retry_gfn() cause it can never know the > whole invalidate range. > > How about we extend the mmu_invalidate_range_start/end everytime so that > it records the whole invalidate range: > > if (kvm->mmu_invalidate_range_start == INVALID_GPA) { > kvm->mmu_invalidate_range_start = start; > kvm->mmu_invalidate_range_end = end; > } else { > kvm->mmu_invalidate_range_start = > min(kvm->mmu_invalidate_range_start, start); > kvm->mmu_invalidate_range_end = > max(kvm->mmu_invalidate_range_end, end); > } Yeah, that does seem to be the easiest solution. I'll post a fixup patch, unless you want the honors.
On 2023-09-20 at 06:55:05 -0700, Sean Christopherson wrote: > On Wed, Sep 20, 2023, Xu Yilun wrote: > > On 2023-09-13 at 18:55:00 -0700, Sean Christopherson wrote: > > > +void kvm_mmu_invalidate_range_add(struct kvm *kvm, gfn_t start, gfn_t end) > > > +{ > > > + lockdep_assert_held_write(&kvm->mmu_lock); > > > + > > > + WARN_ON_ONCE(!kvm->mmu_invalidate_in_progress); > > > + > > > if (likely(kvm->mmu_invalidate_in_progress == 1)) { > > > kvm->mmu_invalidate_range_start = start; > > > kvm->mmu_invalidate_range_end = end; > > > > IIUC, Now we only add or override a part of the invalidate range in > > these fields, IOW only the range in last slot is stored when we unlock. > > Ouch. Good catch! > > > That may break mmu_invalidate_retry_gfn() cause it can never know the > > whole invalidate range. > > > > How about we extend the mmu_invalidate_range_start/end everytime so that > > it records the whole invalidate range: > > > > if (kvm->mmu_invalidate_range_start == INVALID_GPA) { > > kvm->mmu_invalidate_range_start = start; > > kvm->mmu_invalidate_range_end = end; > > } else { > > kvm->mmu_invalidate_range_start = > > min(kvm->mmu_invalidate_range_start, start); > > kvm->mmu_invalidate_range_end = > > max(kvm->mmu_invalidate_range_end, end); > > } > > Yeah, that does seem to be the easiest solution. > > I'll post a fixup patch, unless you want the honors. Please go ahead, cause at a second thought I'm wondering if this simple range extension is reasonable. When the invalidation acrosses multiple slots, I'm not sure if the contiguous HVA range must correspond to contiguous GFN range. If not, are we producing a larger range than required? And when the invalidation acrosses multiple address space, I'm almost sure it is wrong to merge GFN ranges from different address spaces. But I have no clear solution yet. Thanks, Yilun
On Thu, Sep 21, 2023, Xu Yilun wrote: > When the invalidation acrosses multiple slots, I'm not sure if the > contiguous HVA range must correspond to contiguous GFN range. If not, > are we producing a larger range than required? Multiple invalidations are all but guaranteed to yield a range that covers addresses that aren't actually being invalidated. This is true today. > And when the invalidation acrosses multiple address space, I'm almost > sure it is wrong to merge GFN ranges from different address spaces. It's not "wrong" in the sense that false positives do not cause functional problems, at worst a false positive can unnecessarily stall a vCPU until the unrelated invalidations complete. Multiple concurrent invalidations are not common, and if they do happen, they are likely related and will have spacial locality in both host virtual address space and guest physical address space. Given that, we chose for the simple (and fast!) approach of maintaining a single all-encompassing range.
diff --git a/arch/x86/kvm/mmu/mmu.c b/arch/x86/kvm/mmu/mmu.c index e1d011c67cc6..0f0231d2b74f 100644 --- a/arch/x86/kvm/mmu/mmu.c +++ b/arch/x86/kvm/mmu/mmu.c @@ -3056,7 +3056,7 @@ static void direct_pte_prefetch(struct kvm_vcpu *vcpu, u64 *sptep) * * There are several ways to safely use this helper: * - * - Check mmu_invalidate_retry_hva() after grabbing the mapping level, before + * - Check mmu_invalidate_retry_gfn() after grabbing the mapping level, before * consuming it. In this case, mmu_lock doesn't need to be held during the * lookup, but it does need to be held while checking the MMU notifier. * @@ -4358,7 +4358,7 @@ static bool is_page_fault_stale(struct kvm_vcpu *vcpu, return true; return fault->slot && - mmu_invalidate_retry_hva(vcpu->kvm, fault->mmu_seq, fault->hva); + mmu_invalidate_retry_gfn(vcpu->kvm, fault->mmu_seq, fault->gfn); } static int direct_page_fault(struct kvm_vcpu *vcpu, struct kvm_page_fault *fault) @@ -6253,7 +6253,9 @@ void kvm_zap_gfn_range(struct kvm *kvm, gfn_t gfn_start, gfn_t gfn_end) write_lock(&kvm->mmu_lock); - kvm_mmu_invalidate_begin(kvm, 0, -1ul); + kvm_mmu_invalidate_begin(kvm); + + kvm_mmu_invalidate_range_add(kvm, gfn_start, gfn_end); flush = kvm_rmap_zap_gfn_range(kvm, gfn_start, gfn_end); @@ -6266,7 +6268,7 @@ void kvm_zap_gfn_range(struct kvm *kvm, gfn_t gfn_start, gfn_t gfn_end) if (flush) kvm_flush_remote_tlbs_range(kvm, gfn_start, gfn_end - gfn_start); - kvm_mmu_invalidate_end(kvm, 0, -1ul); + kvm_mmu_invalidate_end(kvm); write_unlock(&kvm->mmu_lock); } diff --git a/arch/x86/kvm/vmx/vmx.c b/arch/x86/kvm/vmx/vmx.c index 72e3943f3693..6e502ba93141 100644 --- a/arch/x86/kvm/vmx/vmx.c +++ b/arch/x86/kvm/vmx/vmx.c @@ -6757,10 +6757,10 @@ static void vmx_set_apic_access_page_addr(struct kvm_vcpu *vcpu) return; /* - * Grab the memslot so that the hva lookup for the mmu_notifier retry - * is guaranteed to use the same memslot as the pfn lookup, i.e. rely - * on the pfn lookup's validation of the memslot to ensure a valid hva - * is used for the retry check. + * Explicitly grab the memslot using KVM's internal slot ID to ensure + * KVM doesn't unintentionally grab a userspace memslot. It _should_ + * be impossible for userspace to create a memslot for the APIC when + * APICv is enabled, but paranoia won't hurt in this case. */ slot = id_to_memslot(slots, APIC_ACCESS_PAGE_PRIVATE_MEMSLOT); if (!slot || slot->flags & KVM_MEMSLOT_INVALID) @@ -6785,8 +6785,7 @@ static void vmx_set_apic_access_page_addr(struct kvm_vcpu *vcpu) return; read_lock(&vcpu->kvm->mmu_lock); - if (mmu_invalidate_retry_hva(kvm, mmu_seq, - gfn_to_hva_memslot(slot, gfn))) { + if (mmu_invalidate_retry_gfn(kvm, mmu_seq, gfn)) { kvm_make_request(KVM_REQ_APIC_PAGE_RELOAD, vcpu); read_unlock(&vcpu->kvm->mmu_lock); goto out; diff --git a/include/linux/kvm_host.h b/include/linux/kvm_host.h index fb6c6109fdca..11d091688346 100644 --- a/include/linux/kvm_host.h +++ b/include/linux/kvm_host.h @@ -787,8 +787,8 @@ struct kvm { struct mmu_notifier mmu_notifier; unsigned long mmu_invalidate_seq; long mmu_invalidate_in_progress; - unsigned long mmu_invalidate_range_start; - unsigned long mmu_invalidate_range_end; + gfn_t mmu_invalidate_range_start; + gfn_t mmu_invalidate_range_end; #endif struct list_head devices; u64 manual_dirty_log_protect; @@ -1392,10 +1392,9 @@ void kvm_mmu_free_memory_cache(struct kvm_mmu_memory_cache *mc); void *kvm_mmu_memory_cache_alloc(struct kvm_mmu_memory_cache *mc); #endif -void kvm_mmu_invalidate_begin(struct kvm *kvm, unsigned long start, - unsigned long end); -void kvm_mmu_invalidate_end(struct kvm *kvm, unsigned long start, - unsigned long end); +void kvm_mmu_invalidate_begin(struct kvm *kvm); +void kvm_mmu_invalidate_range_add(struct kvm *kvm, gfn_t start, gfn_t end); +void kvm_mmu_invalidate_end(struct kvm *kvm); long kvm_arch_dev_ioctl(struct file *filp, unsigned int ioctl, unsigned long arg); @@ -1970,9 +1969,9 @@ static inline int mmu_invalidate_retry(struct kvm *kvm, unsigned long mmu_seq) return 0; } -static inline int mmu_invalidate_retry_hva(struct kvm *kvm, +static inline int mmu_invalidate_retry_gfn(struct kvm *kvm, unsigned long mmu_seq, - unsigned long hva) + gfn_t gfn) { lockdep_assert_held(&kvm->mmu_lock); /* @@ -1981,10 +1980,20 @@ static inline int mmu_invalidate_retry_hva(struct kvm *kvm, * that might be being invalidated. Note that it may include some false * positives, due to shortcuts when handing concurrent invalidations. */ - if (unlikely(kvm->mmu_invalidate_in_progress) && - hva >= kvm->mmu_invalidate_range_start && - hva < kvm->mmu_invalidate_range_end) - return 1; + if (unlikely(kvm->mmu_invalidate_in_progress)) { + /* + * Dropping mmu_lock after bumping mmu_invalidate_in_progress + * but before updating the range is a KVM bug. + */ + if (WARN_ON_ONCE(kvm->mmu_invalidate_range_start == INVALID_GPA || + kvm->mmu_invalidate_range_end == INVALID_GPA)) + return 1; + + if (gfn >= kvm->mmu_invalidate_range_start && + gfn < kvm->mmu_invalidate_range_end) + return 1; + } + if (kvm->mmu_invalidate_seq != mmu_seq) return 1; return 0; diff --git a/virt/kvm/kvm_main.c b/virt/kvm/kvm_main.c index 0524933856d4..4fad3b01dc1f 100644 --- a/virt/kvm/kvm_main.c +++ b/virt/kvm/kvm_main.c @@ -543,9 +543,7 @@ static inline struct kvm *mmu_notifier_to_kvm(struct mmu_notifier *mn) typedef bool (*gfn_handler_t)(struct kvm *kvm, struct kvm_gfn_range *range); -typedef void (*on_lock_fn_t)(struct kvm *kvm, unsigned long start, - unsigned long end); - +typedef void (*on_lock_fn_t)(struct kvm *kvm); typedef void (*on_unlock_fn_t)(struct kvm *kvm); struct kvm_mmu_notifier_range { @@ -637,7 +635,8 @@ static __always_inline int __kvm_handle_hva_range(struct kvm *kvm, locked = true; KVM_MMU_LOCK(kvm); if (!IS_KVM_NULL_FN(range->on_lock)) - range->on_lock(kvm, range->start, range->end); + range->on_lock(kvm); + if (IS_KVM_NULL_FN(range->handler)) break; } @@ -742,15 +741,26 @@ static void kvm_mmu_notifier_change_pte(struct mmu_notifier *mn, kvm_handle_hva_range(mn, address, address + 1, arg, kvm_change_spte_gfn); } -void kvm_mmu_invalidate_begin(struct kvm *kvm, unsigned long start, - unsigned long end) +void kvm_mmu_invalidate_begin(struct kvm *kvm) { + lockdep_assert_held_write(&kvm->mmu_lock); /* * The count increase must become visible at unlock time as no * spte can be established without taking the mmu_lock and * count is also read inside the mmu_lock critical section. */ kvm->mmu_invalidate_in_progress++; + + if (likely(kvm->mmu_invalidate_in_progress == 1)) + kvm->mmu_invalidate_range_start = INVALID_GPA; +} + +void kvm_mmu_invalidate_range_add(struct kvm *kvm, gfn_t start, gfn_t end) +{ + lockdep_assert_held_write(&kvm->mmu_lock); + + WARN_ON_ONCE(!kvm->mmu_invalidate_in_progress); + if (likely(kvm->mmu_invalidate_in_progress == 1)) { kvm->mmu_invalidate_range_start = start; kvm->mmu_invalidate_range_end = end; @@ -771,6 +781,12 @@ void kvm_mmu_invalidate_begin(struct kvm *kvm, unsigned long start, } } +static bool kvm_mmu_unmap_gfn_range(struct kvm *kvm, struct kvm_gfn_range *range) +{ + kvm_mmu_invalidate_range_add(kvm, range->start, range->end); + return kvm_unmap_gfn_range(kvm, range); +} + static int kvm_mmu_notifier_invalidate_range_start(struct mmu_notifier *mn, const struct mmu_notifier_range *range) { @@ -778,7 +794,7 @@ static int kvm_mmu_notifier_invalidate_range_start(struct mmu_notifier *mn, const struct kvm_mmu_notifier_range hva_range = { .start = range->start, .end = range->end, - .handler = kvm_unmap_gfn_range, + .handler = kvm_mmu_unmap_gfn_range, .on_lock = kvm_mmu_invalidate_begin, .on_unlock = kvm_arch_guest_memory_reclaimed, .flush_on_ret = true, @@ -817,8 +833,7 @@ static int kvm_mmu_notifier_invalidate_range_start(struct mmu_notifier *mn, return 0; } -void kvm_mmu_invalidate_end(struct kvm *kvm, unsigned long start, - unsigned long end) +void kvm_mmu_invalidate_end(struct kvm *kvm) { /* * This sequence increase will notify the kvm page fault that @@ -833,6 +848,13 @@ void kvm_mmu_invalidate_end(struct kvm *kvm, unsigned long start, * in conjunction with the smp_rmb in mmu_invalidate_retry(). */ kvm->mmu_invalidate_in_progress--; + + /* + * Assert that at least one range must be added between start() and + * end(). Not adding a range isn't fatal, but it is a KVM bug. + */ + WARN_ON_ONCE(kvm->mmu_invalidate_in_progress && + kvm->mmu_invalidate_range_start == INVALID_GPA); } static void kvm_mmu_notifier_invalidate_range_end(struct mmu_notifier *mn,