Message ID | 20231121180223.12484-9-paul@xen.org (mailing list archive) |
---|---|
State | New, archived |
Headers | show |
Series | KVM: xen: update shared_info and vcpu_info handling | expand |
On Tue, 2023-11-21 at 18:02 +0000, Paul Durrant wrote: > > -static int __kvm_gpc_refresh(struct gfn_to_pfn_cache *gpc, gpa_t gpa, > +static int __kvm_gpc_refresh(struct gfn_to_pfn_cache *gpc, u64 addr, bool addr_is_gpa, > unsigned long len) > { > struct kvm_memslots *slots = kvm_memslots(gpc->kvm); > - unsigned long page_offset = offset_in_page(gpa); > + unsigned long page_offset = offset_in_page(addr); > bool unmap_old = false; > kvm_pfn_t old_pfn; > bool hva_change = false; > @@ -244,12 +244,21 @@ static int __kvm_gpc_refresh(struct gfn_to_pfn_cache *gpc, gpa_t gpa, > old_pfn = gpc->pfn; > old_khva = (void *)PAGE_ALIGN_DOWN((uintptr_t)gpc->khva); > > - /* If the userspace HVA is invalid, refresh that first */ > - if (gpc->gpa != gpa || gpc->generation != slots->generation || > - kvm_is_error_hva(gpc->uhva)) { > - gfn_t gfn = gpa_to_gfn(gpa); > + if (!addr_is_gpa) { > + gpc->gpa = KVM_XEN_INVALID_GPA; > + gpc->uhva = PAGE_ALIGN_DOWN(gpc->uhva); > + addr = PAGE_ALIGN_DOWN(addr); > + > + if (gpc->uhva != addr) { > + gpc->uhva = addr; > + hva_change = true; > + } > + } else if (gpc->gpa != addr || > + gpc->generation != slots->generation || > + kvm_is_error_hva(gpc->uhva)) { > + gfn_t gfn = gpa_to_gfn(addr); > > - gpc->gpa = gpa; > + gpc->gpa = addr; > gpc->generation = slots->generation; > gpc->memslot = __gfn_to_memslot(slots, gfn); > gpc->uhva = gfn_to_hva_memslot(gpc->memslot, gfn); Hrm, now that a previous patch means we're preserving the low bits of gpc->uhva surely you don't *need* to mess with the gpc struct? If gpc->gpa == KVM_XEN_INVALID_GPA (but gpc->uhva != KVM_ERR_ERR_BAD && gpc->active) surely that's enough to signal that gpc->uhva is canonical and doesn't need to be looked up from the GPA? And I think that means the 'bool addr_is_gpa' argument can go away from __kvm_gpc_refresh(); you can set it up in {__,}kvm_gpc_activate*() instead?
On 21/11/2023 22:47, David Woodhouse wrote: > On Tue, 2023-11-21 at 18:02 +0000, Paul Durrant wrote: >> >> -static int __kvm_gpc_refresh(struct gfn_to_pfn_cache *gpc, gpa_t gpa, >> +static int __kvm_gpc_refresh(struct gfn_to_pfn_cache *gpc, u64 addr, bool addr_is_gpa, >> unsigned long len) >> { >> struct kvm_memslots *slots = kvm_memslots(gpc->kvm); >> - unsigned long page_offset = offset_in_page(gpa); >> + unsigned long page_offset = offset_in_page(addr); >> bool unmap_old = false; >> kvm_pfn_t old_pfn; >> bool hva_change = false; >> @@ -244,12 +244,21 @@ static int __kvm_gpc_refresh(struct gfn_to_pfn_cache *gpc, gpa_t gpa, >> old_pfn = gpc->pfn; >> old_khva = (void *)PAGE_ALIGN_DOWN((uintptr_t)gpc->khva); >> >> - /* If the userspace HVA is invalid, refresh that first */ >> - if (gpc->gpa != gpa || gpc->generation != slots->generation || >> - kvm_is_error_hva(gpc->uhva)) { >> - gfn_t gfn = gpa_to_gfn(gpa); >> + if (!addr_is_gpa) { >> + gpc->gpa = KVM_XEN_INVALID_GPA; >> + gpc->uhva = PAGE_ALIGN_DOWN(gpc->uhva); >> + addr = PAGE_ALIGN_DOWN(addr); >> + >> + if (gpc->uhva != addr) { >> + gpc->uhva = addr; >> + hva_change = true; >> + } >> + } else if (gpc->gpa != addr || >> + gpc->generation != slots->generation || >> + kvm_is_error_hva(gpc->uhva)) { >> + gfn_t gfn = gpa_to_gfn(addr); >> >> - gpc->gpa = gpa; >> + gpc->gpa = addr; >> gpc->generation = slots->generation; >> gpc->memslot = __gfn_to_memslot(slots, gfn); >> gpc->uhva = gfn_to_hva_memslot(gpc->memslot, gfn); > > Hrm, now that a previous patch means we're preserving the low bits of > gpc->uhva surely you don't *need* to mess with the gpc struct? > I'm not messing with it, am I? > If gpc->gpa == KVM_XEN_INVALID_GPA (but gpc->uhva != KVM_ERR_ERR_BAD && > gpc->active) surely that's enough to signal that gpc->uhva is canonical > and doesn't need to be looked up from the GPA? > > And I think that means the 'bool addr_is_gpa' argument can go away from > __kvm_gpc_refresh(); you can set it up in {__,}kvm_gpc_activate*() > instead? Alas not... __kvm_gpc_refresh() still needs to know *something* has changed, otherwise the khva will be stale. Paul
diff --git a/include/linux/kvm_host.h b/include/linux/kvm_host.h index b1dc2e5a64f3..484c587e8290 100644 --- a/include/linux/kvm_host.h +++ b/include/linux/kvm_host.h @@ -1312,6 +1312,22 @@ void kvm_gpc_init(struct gfn_to_pfn_cache *gpc, struct kvm *kvm); */ int kvm_gpc_activate(struct gfn_to_pfn_cache *gpc, gpa_t gpa, unsigned long len); +/** + * kvm_gpc_activate_hva - prepare a cached kernel mapping and HPA for a given HVA. + * + * @gpc: struct gfn_to_pfn_cache object. + * @hva: userspace virtual address to map. + * @len: sanity check; the range being access must fit a single page. + * + * @return: 0 for success. + * -EINVAL for a mapping which would cross a page boundary. + * -EFAULT for an untranslatable guest physical address. + * + * The semantics of this function are the same as those of kvm_gpc_activate(). It + * merely bypasses a layer of address translation. + */ +int kvm_gpc_activate_hva(struct gfn_to_pfn_cache *gpc, unsigned long hva, unsigned long len); + /** * kvm_gpc_check - check validity of a gfn_to_pfn_cache. * @@ -1365,7 +1381,8 @@ void kvm_gpc_deactivate(struct gfn_to_pfn_cache *gpc); */ static inline void kvm_gpc_mark_dirty(struct gfn_to_pfn_cache *gpc) { - mark_page_dirty_in_slot(gpc->kvm, gpc->memslot, gpc->gpa >> PAGE_SHIFT); + if (gpc->gpa != KVM_XEN_INVALID_GPA) + mark_page_dirty_in_slot(gpc->kvm, gpc->memslot, gpc->gpa >> PAGE_SHIFT); } void kvm_sigset_activate(struct kvm_vcpu *vcpu); diff --git a/virt/kvm/pfncache.c b/virt/kvm/pfncache.c index c545f6246501..ed700afeec49 100644 --- a/virt/kvm/pfncache.c +++ b/virt/kvm/pfncache.c @@ -209,11 +209,11 @@ static kvm_pfn_t hva_to_pfn_retry(struct gfn_to_pfn_cache *gpc) return -EFAULT; } -static int __kvm_gpc_refresh(struct gfn_to_pfn_cache *gpc, gpa_t gpa, +static int __kvm_gpc_refresh(struct gfn_to_pfn_cache *gpc, u64 addr, bool addr_is_gpa, unsigned long len) { struct kvm_memslots *slots = kvm_memslots(gpc->kvm); - unsigned long page_offset = offset_in_page(gpa); + unsigned long page_offset = offset_in_page(addr); bool unmap_old = false; kvm_pfn_t old_pfn; bool hva_change = false; @@ -244,12 +244,21 @@ static int __kvm_gpc_refresh(struct gfn_to_pfn_cache *gpc, gpa_t gpa, old_pfn = gpc->pfn; old_khva = (void *)PAGE_ALIGN_DOWN((uintptr_t)gpc->khva); - /* If the userspace HVA is invalid, refresh that first */ - if (gpc->gpa != gpa || gpc->generation != slots->generation || - kvm_is_error_hva(gpc->uhva)) { - gfn_t gfn = gpa_to_gfn(gpa); + if (!addr_is_gpa) { + gpc->gpa = KVM_XEN_INVALID_GPA; + gpc->uhva = PAGE_ALIGN_DOWN(gpc->uhva); + addr = PAGE_ALIGN_DOWN(addr); + + if (gpc->uhva != addr) { + gpc->uhva = addr; + hva_change = true; + } + } else if (gpc->gpa != addr || + gpc->generation != slots->generation || + kvm_is_error_hva(gpc->uhva)) { + gfn_t gfn = gpa_to_gfn(addr); - gpc->gpa = gpa; + gpc->gpa = addr; gpc->generation = slots->generation; gpc->memslot = __gfn_to_memslot(slots, gfn); gpc->uhva = gfn_to_hva_memslot(gpc->memslot, gfn); @@ -317,7 +326,10 @@ static int __kvm_gpc_refresh(struct gfn_to_pfn_cache *gpc, gpa_t gpa, int kvm_gpc_refresh(struct gfn_to_pfn_cache *gpc, unsigned long len) { - return __kvm_gpc_refresh(gpc, gpc->gpa, len); + if (gpc->gpa != KVM_XEN_INVALID_GPA) + return __kvm_gpc_refresh(gpc, gpc->gpa, true, len); + + return __kvm_gpc_refresh(gpc, gpc->uhva, false, len); } void kvm_gpc_init(struct gfn_to_pfn_cache *gpc, struct kvm *kvm) @@ -330,7 +342,8 @@ void kvm_gpc_init(struct gfn_to_pfn_cache *gpc, struct kvm *kvm) gpc->uhva = KVM_HVA_ERR_BAD; } -int kvm_gpc_activate(struct gfn_to_pfn_cache *gpc, gpa_t gpa, unsigned long len) +static int __kvm_gpc_activate(struct gfn_to_pfn_cache *gpc, u64 addr, bool addr_is_gpa, + unsigned long len) { struct kvm *kvm = gpc->kvm; @@ -351,7 +364,17 @@ int kvm_gpc_activate(struct gfn_to_pfn_cache *gpc, gpa_t gpa, unsigned long len) gpc->active = true; write_unlock_irq(&gpc->lock); } - return __kvm_gpc_refresh(gpc, gpa, len); + return __kvm_gpc_refresh(gpc, addr, addr_is_gpa, len); +} + +int kvm_gpc_activate(struct gfn_to_pfn_cache *gpc, gpa_t gpa, unsigned long len) +{ + return __kvm_gpc_activate(gpc, gpa, true, len); +} + +int kvm_gpc_activate_hva(struct gfn_to_pfn_cache *gpc, unsigned long hva, unsigned long len) +{ + return __kvm_gpc_activate(gpc, hva, false, len); } void kvm_gpc_deactivate(struct gfn_to_pfn_cache *gpc)