Message ID | 20190821153656.33429-5-steven.price@arm.com (mailing list archive) |
---|---|
State | New, archived |
Headers | show |
Series | arm64: Stolen time support | expand |
On Wed, 21 Aug 2019 16:36:50 +0100 Steven Price <steven.price@arm.com> wrote: > kvm_put_guest() is analogous to put_user() - it writes a single value to > the guest physical address. The implementation is built upon put_user() > and so it has the same single copy atomic properties. > > Signed-off-by: Steven Price <steven.price@arm.com> > --- > include/linux/kvm_host.h | 24 ++++++++++++++++++++++++ > 1 file changed, 24 insertions(+) > > diff --git a/include/linux/kvm_host.h b/include/linux/kvm_host.h > index fcb46b3374c6..e154a1897e20 100644 > --- a/include/linux/kvm_host.h > +++ b/include/linux/kvm_host.h > @@ -746,6 +746,30 @@ int kvm_write_guest_offset_cached(struct kvm *kvm, struct gfn_to_hva_cache *ghc, > unsigned long len); > int kvm_gfn_to_hva_cache_init(struct kvm *kvm, struct gfn_to_hva_cache *ghc, > gpa_t gpa, unsigned long len); > + > +#define __kvm_put_guest(kvm, gfn, offset, value, type) \ > +({ \ > + unsigned long __addr = gfn_to_hva(kvm, gfn); \ > + type __user *__uaddr = (type __user *)(__addr + offset); \ > + int __ret = 0; \ Why initialize __ret? > + \ > + if (kvm_is_error_hva(__addr)) \ > + __ret = -EFAULT; \ > + else \ > + __ret = put_user(value, __uaddr); \ > + if (!__ret) \ > + mark_page_dirty(kvm, gfn); \ > + __ret; \ > +}) > + > +#define kvm_put_guest(kvm, gpa, value, type) \ > +({ \ > + gpa_t __gpa = gpa; \ > + struct kvm *__kvm = kvm; \ > + __kvm_put_guest(__kvm, __gpa >> PAGE_SHIFT, \ > + offset_in_page(__gpa), (value), type); \ > +}) > + > int kvm_clear_guest_page(struct kvm *kvm, gfn_t gfn, int offset, int len); > int kvm_clear_guest(struct kvm *kvm, gpa_t gpa, unsigned long len); > struct kvm_memory_slot *gfn_to_memslot(struct kvm *kvm, gfn_t gfn);
On 22/08/2019 11:29, Jonathan Cameron wrote: > On Wed, 21 Aug 2019 16:36:50 +0100 > Steven Price <steven.price@arm.com> wrote: > >> kvm_put_guest() is analogous to put_user() - it writes a single value to >> the guest physical address. The implementation is built upon put_user() >> and so it has the same single copy atomic properties. >> >> Signed-off-by: Steven Price <steven.price@arm.com> >> --- >> include/linux/kvm_host.h | 24 ++++++++++++++++++++++++ >> 1 file changed, 24 insertions(+) >> >> diff --git a/include/linux/kvm_host.h b/include/linux/kvm_host.h >> index fcb46b3374c6..e154a1897e20 100644 >> --- a/include/linux/kvm_host.h >> +++ b/include/linux/kvm_host.h >> @@ -746,6 +746,30 @@ int kvm_write_guest_offset_cached(struct kvm *kvm, struct gfn_to_hva_cache *ghc, >> unsigned long len); >> int kvm_gfn_to_hva_cache_init(struct kvm *kvm, struct gfn_to_hva_cache *ghc, >> gpa_t gpa, unsigned long len); >> + >> +#define __kvm_put_guest(kvm, gfn, offset, value, type) \ >> +({ \ >> + unsigned long __addr = gfn_to_hva(kvm, gfn); \ >> + type __user *__uaddr = (type __user *)(__addr + offset); \ >> + int __ret = 0; \ > > Why initialize __ret? Good question. Actually looking at this again if I reorder this to be pessimistic I can make it shorter: int __ret = -EFAULT; if (!kvm_is_error_hva(__addr)) __ret = put_user(value, __uaddr); if (!__ret) mark_page_dirty(kvm, gfn); __ret; Thanks for taking a look. Steve >> + \ >> + if (kvm_is_error_hva(__addr)) \ >> + __ret = -EFAULT; \ >> + else \ >> + __ret = put_user(value, __uaddr); \ >> + if (!__ret) \ >> + mark_page_dirty(kvm, gfn); \ >> + __ret; \ >> +}) >> + >> +#define kvm_put_guest(kvm, gpa, value, type) \ >> +({ \ >> + gpa_t __gpa = gpa; \ >> + struct kvm *__kvm = kvm; \ >> + __kvm_put_guest(__kvm, __gpa >> PAGE_SHIFT, \ >> + offset_in_page(__gpa), (value), type); \ >> +}) >> + >> int kvm_clear_guest_page(struct kvm *kvm, gfn_t gfn, int offset, int len); >> int kvm_clear_guest(struct kvm *kvm, gpa_t gpa, unsigned long len); >> struct kvm_memory_slot *gfn_to_memslot(struct kvm *kvm, gfn_t gfn); > > > > _______________________________________________ > linux-arm-kernel mailing list > linux-arm-kernel@lists.infradead.org > http://lists.infradead.org/mailman/listinfo/linux-arm-kernel >
On Wed, Aug 21, 2019 at 04:36:50PM +0100, Steven Price wrote: > kvm_put_guest() is analogous to put_user() - it writes a single value to > the guest physical address. The implementation is built upon put_user() > and so it has the same single copy atomic properties. What you mean by "single copy atomic"? I.e. what guarantees does put_user() provide that __copy_to_user() does not? > > Signed-off-by: Steven Price <steven.price@arm.com> > --- > include/linux/kvm_host.h | 24 ++++++++++++++++++++++++ > 1 file changed, 24 insertions(+) > > diff --git a/include/linux/kvm_host.h b/include/linux/kvm_host.h > index fcb46b3374c6..e154a1897e20 100644 > --- a/include/linux/kvm_host.h > +++ b/include/linux/kvm_host.h > @@ -746,6 +746,30 @@ int kvm_write_guest_offset_cached(struct kvm *kvm, struct gfn_to_hva_cache *ghc, > unsigned long len); > int kvm_gfn_to_hva_cache_init(struct kvm *kvm, struct gfn_to_hva_cache *ghc, > gpa_t gpa, unsigned long len); > + > +#define __kvm_put_guest(kvm, gfn, offset, value, type) \ > +({ \ > + unsigned long __addr = gfn_to_hva(kvm, gfn); \ > + type __user *__uaddr = (type __user *)(__addr + offset); \ > + int __ret = 0; \ > + \ > + if (kvm_is_error_hva(__addr)) \ > + __ret = -EFAULT; \ > + else \ > + __ret = put_user(value, __uaddr); \ > + if (!__ret) \ > + mark_page_dirty(kvm, gfn); \ > + __ret; \ > +}) > + > +#define kvm_put_guest(kvm, gpa, value, type) \ > +({ \ > + gpa_t __gpa = gpa; \ > + struct kvm *__kvm = kvm; \ > + __kvm_put_guest(__kvm, __gpa >> PAGE_SHIFT, \ > + offset_in_page(__gpa), (value), type); \ > +}) > + > int kvm_clear_guest_page(struct kvm *kvm, gfn_t gfn, int offset, int len); > int kvm_clear_guest(struct kvm *kvm, gpa_t gpa, unsigned long len); > struct kvm_memory_slot *gfn_to_memslot(struct kvm *kvm, gfn_t gfn); > -- > 2.20.1 >
On 22/08/2019 16:28, Sean Christopherson wrote: > On Wed, Aug 21, 2019 at 04:36:50PM +0100, Steven Price wrote: >> kvm_put_guest() is analogous to put_user() - it writes a single value to >> the guest physical address. The implementation is built upon put_user() >> and so it has the same single copy atomic properties. > > What you mean by "single copy atomic"? I.e. what guarantees does > put_user() provide that __copy_to_user() does not? Single-copy atomicity is defined by the Arm architecture[1] and I'm not going to try to go into the full details here, so this is a summary. For the sake of this feature what we care about is that the value written/read cannot be "torn". In other words if there is a read (in this case from another VCPU) that is racing with the write then the read will either get the old value or the new value. It cannot return a mixture. (This is of course assuming that the read is using a single-copy atomic safe method). __copy_to_user() is implemented as a memcpy() and as such cannot provide single-copy atomicity in the general case (the buffer could easily be bigger than the architecture can guarantee). put_user() on the other hand is implemented (on arm64) as an explicit store instruction and therefore is guaranteed by the architecture to be single-copy atomic (i.e. another CPU cannot see a half-written value). Steve [1] https://static.docs.arm.com/ddi0487/ea/DDI0487E_a_armv8_arm.pdf#page=110
On Thu, Aug 22, 2019 at 04:46:10PM +0100, Steven Price wrote: > On 22/08/2019 16:28, Sean Christopherson wrote: > > On Wed, Aug 21, 2019 at 04:36:50PM +0100, Steven Price wrote: > >> kvm_put_guest() is analogous to put_user() - it writes a single value to > >> the guest physical address. The implementation is built upon put_user() > >> and so it has the same single copy atomic properties. > > > > What you mean by "single copy atomic"? I.e. what guarantees does > > put_user() provide that __copy_to_user() does not? > > Single-copy atomicity is defined by the Arm architecture[1] and I'm not > going to try to go into the full details here, so this is a summary. > > For the sake of this feature what we care about is that the value > written/read cannot be "torn". In other words if there is a read (in > this case from another VCPU) that is racing with the write then the read > will either get the old value or the new value. It cannot return a > mixture. (This is of course assuming that the read is using a > single-copy atomic safe method). Thanks for the explanation. I assumed that's what you were referring to, but wanted to double check. > __copy_to_user() is implemented as a memcpy() and as such cannot provide > single-copy atomicity in the general case (the buffer could easily be > bigger than the architecture can guarantee). > > put_user() on the other hand is implemented (on arm64) as an explicit > store instruction and therefore is guaranteed by the architecture to be > single-copy atomic (i.e. another CPU cannot see a half-written value). I don't think kvm_put_guest() belongs in generic code, at least not with the current changelog explanation about it providing single-copy atomic semantics. AFAICT, the single-copy thing is very much an arm64 implementation detail, e.g. the vast majority of 32-bit architectures, including x86, do not provide any guarantees, and x86-64 generates more or less the same code for put_user() and __copy_to_user() for 8-byte and smaller accesses. As an alternative to kvm_put_guest() entirely, is it an option to change arm64's raw_copy_to_user() to redirect to __put_user() for sizes that are constant at compile time and can be handled by __put_user()? That would allow using kvm_write_guest() to update stolen time, albeit with arguably an even bigger dependency on the uaccess implementation details.
On 22/08/2019 17:24, Sean Christopherson wrote: > On Thu, Aug 22, 2019 at 04:46:10PM +0100, Steven Price wrote: >> On 22/08/2019 16:28, Sean Christopherson wrote: >>> On Wed, Aug 21, 2019 at 04:36:50PM +0100, Steven Price wrote: >>>> kvm_put_guest() is analogous to put_user() - it writes a single value to >>>> the guest physical address. The implementation is built upon put_user() >>>> and so it has the same single copy atomic properties. >>> >>> What you mean by "single copy atomic"? I.e. what guarantees does >>> put_user() provide that __copy_to_user() does not? >> >> Single-copy atomicity is defined by the Arm architecture[1] and I'm not >> going to try to go into the full details here, so this is a summary. >> >> For the sake of this feature what we care about is that the value >> written/read cannot be "torn". In other words if there is a read (in >> this case from another VCPU) that is racing with the write then the read >> will either get the old value or the new value. It cannot return a >> mixture. (This is of course assuming that the read is using a >> single-copy atomic safe method). > > Thanks for the explanation. I assumed that's what you were referring to, > but wanted to double check. > >> __copy_to_user() is implemented as a memcpy() and as such cannot provide >> single-copy atomicity in the general case (the buffer could easily be >> bigger than the architecture can guarantee). >> >> put_user() on the other hand is implemented (on arm64) as an explicit >> store instruction and therefore is guaranteed by the architecture to be >> single-copy atomic (i.e. another CPU cannot see a half-written value). > > I don't think kvm_put_guest() belongs in generic code, at least not with > the current changelog explanation about it providing single-copy atomic > semantics. AFAICT, the single-copy thing is very much an arm64 > implementation detail, e.g. the vast majority of 32-bit architectures, > including x86, do not provide any guarantees, and x86-64 generates more > or less the same code for put_user() and __copy_to_user() for 8-byte and > smaller accesses. > > As an alternative to kvm_put_guest() entirely, is it an option to change > arm64's raw_copy_to_user() to redirect to __put_user() for sizes that are > constant at compile time and can be handled by __put_user()? That would > allow using kvm_write_guest() to update stolen time, albeit with > arguably an even bigger dependency on the uaccess implementation details. I think it's important to in some way ensure that the desire that this is a single write is shown. copy_to_user() is effectively "setup();memcpy();finish();" and while a good memcpy() implementation would be identical to put_user() there's a lot more room for this being broken in the future by changes to the memcpy() implementation. (And I don't want to require that memcpy() has to detect this case). One suggestion is to call it something like kvm_put_guest_atomic() to reflect the atomicity requirement. Presumably that would be based on a new put_user_atomic() which architectures could override as necessary if put_user() doesn't provide the necessary guarantees. Steve
diff --git a/include/linux/kvm_host.h b/include/linux/kvm_host.h index fcb46b3374c6..e154a1897e20 100644 --- a/include/linux/kvm_host.h +++ b/include/linux/kvm_host.h @@ -746,6 +746,30 @@ int kvm_write_guest_offset_cached(struct kvm *kvm, struct gfn_to_hva_cache *ghc, unsigned long len); int kvm_gfn_to_hva_cache_init(struct kvm *kvm, struct gfn_to_hva_cache *ghc, gpa_t gpa, unsigned long len); + +#define __kvm_put_guest(kvm, gfn, offset, value, type) \ +({ \ + unsigned long __addr = gfn_to_hva(kvm, gfn); \ + type __user *__uaddr = (type __user *)(__addr + offset); \ + int __ret = 0; \ + \ + if (kvm_is_error_hva(__addr)) \ + __ret = -EFAULT; \ + else \ + __ret = put_user(value, __uaddr); \ + if (!__ret) \ + mark_page_dirty(kvm, gfn); \ + __ret; \ +}) + +#define kvm_put_guest(kvm, gpa, value, type) \ +({ \ + gpa_t __gpa = gpa; \ + struct kvm *__kvm = kvm; \ + __kvm_put_guest(__kvm, __gpa >> PAGE_SHIFT, \ + offset_in_page(__gpa), (value), type); \ +}) + int kvm_clear_guest_page(struct kvm *kvm, gfn_t gfn, int offset, int len); int kvm_clear_guest(struct kvm *kvm, gpa_t gpa, unsigned long len); struct kvm_memory_slot *gfn_to_memslot(struct kvm *kvm, gfn_t gfn);
kvm_put_guest() is analogous to put_user() - it writes a single value to the guest physical address. The implementation is built upon put_user() and so it has the same single copy atomic properties. Signed-off-by: Steven Price <steven.price@arm.com> --- include/linux/kvm_host.h | 24 ++++++++++++++++++++++++ 1 file changed, 24 insertions(+)