diff mbox

[v5,5/9] x86/hvm: add vcpu parameter to guest memory copy function

Message ID 20170119172941.65642-6-roger.pau@citrix.com (mailing list archive)
State New, archived
Headers show

Commit Message

Roger Pau Monné Jan. 19, 2017, 5:29 p.m. UTC
Current __hvm_copy assumes that the destination memory belongs to the current
vcpu, but this is not always the case since for PVHv2 Dom0 build hvm copy
functions are used with current being the idle vcpu. Add a new vcpu parameter
to hvm copy in order to solve that. Note that only hvm_copy_to_guest_phys_vcpu
is introduced, because that's the only one at the moment that's required in
order to build a PVHv2 Dom0.

While there, also assert that the passed vcpu belongs to a HVM guest.

Signed-off-by: Roger Pau Monné <roger.pau@citrix.com>
---
Changes since v4:
 - New in the series.
---
 xen/arch/x86/hvm/hvm.c            | 40 ++++++++++++++++++++++++---------------
 xen/include/asm-x86/hvm/support.h |  2 ++
 2 files changed, 27 insertions(+), 15 deletions(-)

Comments

Andrew Cooper Jan. 20, 2017, 7:45 p.m. UTC | #1
On 19/01/17 17:29, Roger Pau Monne wrote:
> @@ -3172,9 +3173,9 @@ static enum hvm_copy_result __hvm_copy(
>              {
>                  static unsigned long lastpage;
>                  if ( xchg(&lastpage, gfn) != gfn )
> -                    gdprintk(XENLOG_DEBUG, "guest attempted write to read-only"
> +                    printk(XENLOG_DEBUG, "%pv guest attempted write to read-only"
>                               " memory page. gfn=%#lx, mfn=%#lx\n",
> -                             gfn, page_to_mfn(page));
> +                             vcpu->domain, gfn, page_to_mfn(page));

Just vcpu here, or hitting this printk() will try to follow
d->shared_info as if it were v->domain.

With this fixed, Reviewed-by: Andrew Cooper <andrew.cooper3@citrix.com>
Roger Pau Monné Jan. 23, 2017, 1:50 p.m. UTC | #2
On Fri, Jan 20, 2017 at 07:45:35PM +0000, Andrew Cooper wrote:
> On 19/01/17 17:29, Roger Pau Monne wrote:
> > @@ -3172,9 +3173,9 @@ static enum hvm_copy_result __hvm_copy(
> >              {
> >                  static unsigned long lastpage;
> >                  if ( xchg(&lastpage, gfn) != gfn )
> > -                    gdprintk(XENLOG_DEBUG, "guest attempted write to read-only"
> > +                    printk(XENLOG_DEBUG, "%pv guest attempted write to read-only"
> >                               " memory page. gfn=%#lx, mfn=%#lx\n",
> > -                             gfn, page_to_mfn(page));
> > +                             vcpu->domain, gfn, page_to_mfn(page));
> 
> Just vcpu here, or hitting this printk() will try to follow
> d->shared_info as if it were v->domain.
>
> With this fixed, Reviewed-by: Andrew Cooper <andrew.cooper3@citrix.com>

Thanks, not sure why I've used domain here instead of vcpu.

Roger.
Jan Beulich Jan. 26, 2017, 12:51 p.m. UTC | #3
>>> On 19.01.17 at 18:29, <roger.pau@citrix.com> wrote:
> --- a/xen/arch/x86/hvm/hvm.c
> +++ b/xen/arch/x86/hvm/hvm.c
> @@ -3077,15 +3077,16 @@ void hvm_task_switch(
>  #define HVMCOPY_linear     (1u<<2)
>  static enum hvm_copy_result __hvm_copy(
>      void *buf, paddr_t addr, int size, unsigned int flags, uint32_t pfec,
> -    pagefault_info_t *pfinfo)
> +    pagefault_info_t *pfinfo, struct vcpu *vcpu)

Please stick to v as the name here, as done almost everywhere else.
It also looks a little odd to me to have this parameter last, when
commonly we put it first. For a copy function I can see the buffer
info to be first, but please consider moving it ahead of flags.

> @@ -3137,12 +3138,12 @@ static enum hvm_copy_result __hvm_copy(
>           * - 32-bit WinXP (& older Windows) on AMD CPUs for LAPIC accesses,
>           * - newer Windows (like Server 2012) for HPET accesses.
>           */
> -        if ( !nestedhvm_vcpu_in_guestmode(curr)
> -             && is_hvm_vcpu(curr)
> +        if ( !nestedhvm_vcpu_in_guestmode(vcpu)
> +             && is_hvm_vcpu(vcpu)
>               && hvm_mmio_internal(gpa) )

This must not be called for v != current.

Jan
Jan Beulich Jan. 26, 2017, 1:33 p.m. UTC | #4
>>> On 19.01.17 at 18:29, <roger.pau@citrix.com> wrote:
> --- a/xen/include/asm-x86/hvm/support.h
> +++ b/xen/include/asm-x86/hvm/support.h
> @@ -71,6 +71,8 @@ enum hvm_copy_result hvm_copy_to_guest_phys(
>      paddr_t paddr, void *buf, int size);
>  enum hvm_copy_result hvm_copy_from_guest_phys(
>      void *buf, paddr_t paddr, int size);
> +enum hvm_copy_result hvm_copy_to_guest_phys_vcpu(
> +    paddr_t paddr, void *buf, size_t size, struct vcpu *vcpu);

Btw., there being just five existing callers, I think we should rather
change the existing function instead of adding yet another wrapper
of __hvm_copy().

Jan
Roger Pau Monné Jan. 27, 2017, 2:55 p.m. UTC | #5
On Thu, Jan 26, 2017 at 06:33:11AM -0700, Jan Beulich wrote:
> >>> On 19.01.17 at 18:29, <roger.pau@citrix.com> wrote:
> > --- a/xen/include/asm-x86/hvm/support.h
> > +++ b/xen/include/asm-x86/hvm/support.h
> > @@ -71,6 +71,8 @@ enum hvm_copy_result hvm_copy_to_guest_phys(
> >      paddr_t paddr, void *buf, int size);
> >  enum hvm_copy_result hvm_copy_from_guest_phys(
> >      void *buf, paddr_t paddr, int size);
> > +enum hvm_copy_result hvm_copy_to_guest_phys_vcpu(
> > +    paddr_t paddr, void *buf, size_t size, struct vcpu *vcpu);
> 
> Btw., there being just five existing callers, I think we should rather
> change the existing function instead of adding yet another wrapper
> of __hvm_copy().

Done, together with the changes requested to this patch in the other email.

Thanks, Roger.
diff mbox

Patch

diff --git a/xen/arch/x86/hvm/hvm.c b/xen/arch/x86/hvm/hvm.c
index 41a44c9..600a04d 100644
--- a/xen/arch/x86/hvm/hvm.c
+++ b/xen/arch/x86/hvm/hvm.c
@@ -3077,15 +3077,16 @@  void hvm_task_switch(
 #define HVMCOPY_linear     (1u<<2)
 static enum hvm_copy_result __hvm_copy(
     void *buf, paddr_t addr, int size, unsigned int flags, uint32_t pfec,
-    pagefault_info_t *pfinfo)
+    pagefault_info_t *pfinfo, struct vcpu *vcpu)
 {
-    struct vcpu *curr = current;
     unsigned long gfn;
     struct page_info *page;
     p2m_type_t p2mt;
     char *p;
     int count, todo = size;
 
+    ASSERT(has_hvm_container_vcpu(vcpu));
+
     /*
      * XXX Disable for 4.1.0: PV-on-HVM drivers will do grant-table ops
      * such as query_size. Grant-table code currently does copy_to/from_guest
@@ -3110,7 +3111,7 @@  static enum hvm_copy_result __hvm_copy(
 
         if ( flags & HVMCOPY_linear )
         {
-            gfn = paging_gva_to_gfn(curr, addr, &pfec);
+            gfn = paging_gva_to_gfn(vcpu, addr, &pfec);
             if ( gfn == gfn_x(INVALID_GFN) )
             {
                 if ( pfec & PFEC_page_paged )
@@ -3137,12 +3138,12 @@  static enum hvm_copy_result __hvm_copy(
          * - 32-bit WinXP (& older Windows) on AMD CPUs for LAPIC accesses,
          * - newer Windows (like Server 2012) for HPET accesses.
          */
-        if ( !nestedhvm_vcpu_in_guestmode(curr)
-             && is_hvm_vcpu(curr)
+        if ( !nestedhvm_vcpu_in_guestmode(vcpu)
+             && is_hvm_vcpu(vcpu)
              && hvm_mmio_internal(gpa) )
             return HVMCOPY_bad_gfn_to_mfn;
 
-        page = get_page_from_gfn(curr->domain, gfn, &p2mt, P2M_UNSHARE);
+        page = get_page_from_gfn(vcpu->domain, gfn, &p2mt, P2M_UNSHARE);
 
         if ( !page )
             return HVMCOPY_bad_gfn_to_mfn;
@@ -3150,7 +3151,7 @@  static enum hvm_copy_result __hvm_copy(
         if ( p2m_is_paging(p2mt) )
         {
             put_page(page);
-            p2m_mem_paging_populate(curr->domain, gfn);
+            p2m_mem_paging_populate(vcpu->domain, gfn);
             return HVMCOPY_gfn_paged_out;
         }
         if ( p2m_is_shared(p2mt) )
@@ -3172,9 +3173,9 @@  static enum hvm_copy_result __hvm_copy(
             {
                 static unsigned long lastpage;
                 if ( xchg(&lastpage, gfn) != gfn )
-                    gdprintk(XENLOG_DEBUG, "guest attempted write to read-only"
+                    printk(XENLOG_DEBUG, "%pv guest attempted write to read-only"
                              " memory page. gfn=%#lx, mfn=%#lx\n",
-                             gfn, page_to_mfn(page));
+                             vcpu->domain, gfn, page_to_mfn(page));
             }
             else
             {
@@ -3182,7 +3183,7 @@  static enum hvm_copy_result __hvm_copy(
                     memcpy(p, buf, count);
                 else
                     memset(p, 0, count);
-                paging_mark_dirty(curr->domain, _mfn(page_to_mfn(page)));
+                paging_mark_dirty(vcpu->domain, _mfn(page_to_mfn(page)));
             }
         }
         else
@@ -3202,18 +3203,25 @@  static enum hvm_copy_result __hvm_copy(
     return HVMCOPY_okay;
 }
 
+enum hvm_copy_result hvm_copy_to_guest_phys_vcpu(
+    paddr_t paddr, void *buf, size_t size, struct vcpu *vcpu)
+{
+    return __hvm_copy(buf, paddr, size,
+                      HVMCOPY_to_guest | HVMCOPY_phys, 0, NULL, vcpu);
+}
+
 enum hvm_copy_result hvm_copy_to_guest_phys(
     paddr_t paddr, void *buf, int size)
 {
     return __hvm_copy(buf, paddr, size,
-                      HVMCOPY_to_guest | HVMCOPY_phys, 0, NULL);
+                      HVMCOPY_to_guest | HVMCOPY_phys, 0, NULL, current);
 }
 
 enum hvm_copy_result hvm_copy_from_guest_phys(
     void *buf, paddr_t paddr, int size)
 {
     return __hvm_copy(buf, paddr, size,
-                      HVMCOPY_from_guest | HVMCOPY_phys, 0, NULL);
+                      HVMCOPY_from_guest | HVMCOPY_phys, 0, NULL, current);
 }
 
 enum hvm_copy_result hvm_copy_to_guest_linear(
@@ -3222,7 +3230,8 @@  enum hvm_copy_result hvm_copy_to_guest_linear(
 {
     return __hvm_copy(buf, addr, size,
                       HVMCOPY_to_guest | HVMCOPY_linear,
-                      PFEC_page_present | PFEC_write_access | pfec, pfinfo);
+                      PFEC_page_present | PFEC_write_access | pfec, pfinfo,
+                      current);
 }
 
 enum hvm_copy_result hvm_copy_from_guest_linear(
@@ -3231,7 +3240,7 @@  enum hvm_copy_result hvm_copy_from_guest_linear(
 {
     return __hvm_copy(buf, addr, size,
                       HVMCOPY_from_guest | HVMCOPY_linear,
-                      PFEC_page_present | pfec, pfinfo);
+                      PFEC_page_present | pfec, pfinfo, current);
 }
 
 enum hvm_copy_result hvm_fetch_from_guest_linear(
@@ -3240,7 +3249,8 @@  enum hvm_copy_result hvm_fetch_from_guest_linear(
 {
     return __hvm_copy(buf, addr, size,
                       HVMCOPY_from_guest | HVMCOPY_linear,
-                      PFEC_page_present | PFEC_insn_fetch | pfec, pfinfo);
+                      PFEC_page_present | PFEC_insn_fetch | pfec, pfinfo,
+                      current);
 }
 
 unsigned long copy_to_user_hvm(void *to, const void *from, unsigned int len)
diff --git a/xen/include/asm-x86/hvm/support.h b/xen/include/asm-x86/hvm/support.h
index ba5899c..1a34d35 100644
--- a/xen/include/asm-x86/hvm/support.h
+++ b/xen/include/asm-x86/hvm/support.h
@@ -71,6 +71,8 @@  enum hvm_copy_result hvm_copy_to_guest_phys(
     paddr_t paddr, void *buf, int size);
 enum hvm_copy_result hvm_copy_from_guest_phys(
     void *buf, paddr_t paddr, int size);
+enum hvm_copy_result hvm_copy_to_guest_phys_vcpu(
+    paddr_t paddr, void *buf, size_t size, struct vcpu *vcpu);
 
 /*
  * Copy to/from a guest linear address. @pfec should include PFEC_user_mode