diff mbox

[v6,3/4] x86/ioreq server: Handle read-modify-write cases for p2m_ioreq_server pages.

Message ID 1472813240-11011-4-git-send-email-yu.c.zhang@linux.intel.com (mailing list archive)
State New, archived
Headers show

Commit Message

Yu Zhang Sept. 2, 2016, 10:47 a.m. UTC
In ept_handle_violation(), write violations are also treated as
read violations. And when a VM is accessing a write-protected
address with read-modify-write instructions, the read emulation
process is triggered first.

For p2m_ioreq_server pages, current ioreq server only forwards
the write operations to the device model. Therefore when such page
is being accessed by a read-modify-write instruction, the read
operations should be emulated here in hypervisor. This patch provides
such a handler to copy the data to the buffer.

Note: MMIOs with p2m_mmio_dm type do not need such special treatment
because both reads and writes will go to the device mode.

Signed-off-by: Paul Durrant <paul.durrant@citrix.com>
Signed-off-by: Yu Zhang <yu.c.zhang@linux.intel.com>
---
Cc: Paul Durrant <paul.durrant@citrix.com>
Cc: Jan Beulich <jbeulich@suse.com>
Cc: Andrew Cooper <andrew.cooper3@citrix.com>
---
 xen/arch/x86/hvm/emulate.c | 45 ++++++++++++++++++++++++++++++++++++++++++++-
 1 file changed, 44 insertions(+), 1 deletion(-)

Comments

Jan Beulich Sept. 5, 2016, 2:10 p.m. UTC | #1
>>> On 02.09.16 at 12:47, <yu.c.zhang@linux.intel.com> wrote:
> --- a/xen/arch/x86/hvm/emulate.c
> +++ b/xen/arch/x86/hvm/emulate.c
> @@ -95,6 +95,41 @@ static const struct hvm_io_handler null_handler = {
>      .ops = &null_ops
>  };
>  
> +static int mem_read(const struct hvm_io_handler *io_handler,
> +                    uint64_t addr,
> +                    uint32_t size,
> +                    uint64_t *data)
> +{
> +    struct domain *currd = current->domain;
> +    unsigned long gmfn = paddr_to_pfn(addr);
> +    unsigned long offset = addr & ~PAGE_MASK;
> +    struct page_info *page = get_page_from_gfn(currd, gmfn, NULL, P2M_UNSHARE);
> +    uint8_t *p;

const (and preferably also void)

> +    ASSERT(offset + size < PAGE_SIZE);

Surely <= ?

> +    if ( !page )
> +        return X86EMUL_UNHANDLEABLE;
> +
> +    p = __map_domain_page(page);
> +    p += offset;
> +    memcpy(data, p, size);
> +
> +    unmap_domain_page(p);
> +    put_page(page);

But anyway - I think rather than all this open coding you would
better call hvm_copy_from_guest_phys().

> +static const struct hvm_io_ops mem_ops = {
> +    .read = mem_read,
> +    .write = null_write
> +};
> +
> +static const struct hvm_io_handler mem_handler = {
> +    .ops = &mem_ops
> +};

I think the mem_ prefix for both objects is a bad one, considering
that this isn't suitable for general memory handling.

> @@ -204,7 +239,15 @@ static int hvmemul_do_io(
>          /* If there is no suitable backing DM, just ignore accesses */
>          if ( !s )
>          {
> -            rc = hvm_process_io_intercept(&null_handler, &p);
> +            /*
> +             * For p2m_ioreq_server pages accessed with read-modify-write
> +             * instructions, we provide a read handler to copy the data to
> +             * the buffer.
> +             */
> +            if ( p2mt == p2m_ioreq_server )

Please add unlikely() here, or aid the compiler in avoiding any
branch by ...

> +                rc = hvm_process_io_intercept(&mem_handler, &p);
> +            else
> +                rc = hvm_process_io_intercept(&null_handler, &p);

... using a conditional expression for the first function argument.

And the comment ahead of the if() now also needs adjustment
(perhaps you want to merge the one you add into that one).

Jan
Yu Zhang Sept. 9, 2016, 6:21 a.m. UTC | #2
On 9/9/2016 1:26 PM, Yu Zhang wrote:
>
> >>> On 02.09.16 at 12:47, <yu.c.zhang@xxxxxxxxxxxxxxx> wrote:
> > --- a/xen/arch/x86/hvm/emulate.c
> > +++ b/xen/arch/x86/hvm/emulate.c
> > @@ -95,6 +95,41 @@ static const struct hvm_io_handler null_handler = {
> >      .ops = &null_ops
> >  };
> >
> > +static int mem_read(const struct hvm_io_handler *io_handler,
> > +                    uint64_t addr,
> > +                    uint32_t size,
> > +                    uint64_t *data)
> > +{
> > +    struct domain *currd = current->domain;
> > +    unsigned long gmfn = paddr_to_pfn(addr);
> > +    unsigned long offset = addr & ~PAGE_MASK;
> > +    struct page_info *page = get_page_from_gfn(currd, gmfn, NULL,
> > P2M_UNSHARE);
> > +    uint8_t *p;
>
> const (and preferably also void)
>

Thanks, I think we do not need this variable with 
hvm_copy_from_guest_phys().

> > +    ASSERT(offset + size < PAGE_SIZE);
>
> Surely <= ?
>

Yes. Thanks.

> > +    if ( !page )
> > +        return X86EMUL_UNHANDLEABLE;
> > +
> > +    p = __map_domain_page(page);
> > +    p += offset;
> > +    memcpy(data, p, size);
> > +
> > +    unmap_domain_page(p);
> > +    put_page(page);
>
> But anyway - I think rather than all this open coding you would
> better call hvm_copy_from_guest_phys().
>

Agree.

> > +static const struct hvm_io_ops mem_ops = {
> > +    .read = mem_read,
> > +    .write = null_write
> > +};
> > +
> > +static const struct hvm_io_handler mem_handler = {
> > +    .ops = &mem_ops
> > +};
>
> I think the mem_ prefix for both objects is a bad one, considering
> that this isn't suitable for general memory handling.

How about ioreq_server_read/ops? It is only for this special p2m type.

>
> > @@ -204,7 +239,15 @@ static int hvmemul_do_io(
> >          /* If there is no suitable backing DM, just ignore accesses */
> >          if ( !s )
> >          {
> > -            rc = hvm_process_io_intercept(&null_handler, &p);
> > +            /*
> > +             * For p2m_ioreq_server pages accessed with 
> read-modify-write
> > +             * instructions, we provide a read handler to copy the 
> data to
> > +             * the buffer.
> > +             */
> > +            if ( p2mt == p2m_ioreq_server )
>
> Please add unlikely() here, or aid the compiler in avoiding any
> branch by ...
>
> > +                rc = hvm_process_io_intercept(&mem_handler, &p);
> > +            else
> > +                rc = hvm_process_io_intercept(&null_handler, &p);
>
> ... using a conditional expression for the first function argument.
>
OK. I prefer to add the unlikely().

> And the comment ahead of the if() now also needs adjustment
> (perhaps you want to merge the one you add into that one).
>

OK. And IIUC, you mean merge to the original comments above the "if (!s)"?
Like this:
         /*
          * For p2m_ioreq_server pages accessed with read-modify-write
          * instructions, we provide a read handler to copy the data to
          * the buffer. For other cases, if there is no suitable backing
          * DM, we just ignore accesses.
          */
         if ( !s )

> Jan
>

Thanks
Yu
Jan Beulich Sept. 9, 2016, 8:12 a.m. UTC | #3
>>> On 09.09.16 at 08:21, <yu.c.zhang@linux.intel.com> wrote:
> On 9/9/2016 1:26 PM, Yu Zhang wrote:
>> >>> On 02.09.16 at 12:47, <yu.c.zhang@xxxxxxxxxxxxxxx> wrote:
>> > +static const struct hvm_io_ops mem_ops = {
>> > +    .read = mem_read,
>> > +    .write = null_write
>> > +};
>> > +
>> > +static const struct hvm_io_handler mem_handler = {
>> > +    .ops = &mem_ops
>> > +};
>>
>> I think the mem_ prefix for both objects is a bad one, considering
>> that this isn't suitable for general memory handling.
> 
> How about ioreq_server_read/ops? It is only for this special p2m type.

SGTM.

>> And the comment ahead of the if() now also needs adjustment
>> (perhaps you want to merge the one you add into that one).
>>
> 
> OK. And IIUC, you mean merge to the original comments above the "if (!s)"?
> Like this:
>          /*
>           * For p2m_ioreq_server pages accessed with read-modify-write
>           * instructions, we provide a read handler to copy the data to
>           * the buffer. For other cases, if there is no suitable backing
>           * DM, we just ignore accesses.
>           */
>          if ( !s )

Yes, thanks.

Jan
diff mbox

Patch

diff --git a/xen/arch/x86/hvm/emulate.c b/xen/arch/x86/hvm/emulate.c
index a33346e..f92de48 100644
--- a/xen/arch/x86/hvm/emulate.c
+++ b/xen/arch/x86/hvm/emulate.c
@@ -95,6 +95,41 @@  static const struct hvm_io_handler null_handler = {
     .ops = &null_ops
 };
 
+static int mem_read(const struct hvm_io_handler *io_handler,
+                    uint64_t addr,
+                    uint32_t size,
+                    uint64_t *data)
+{
+    struct domain *currd = current->domain;
+    unsigned long gmfn = paddr_to_pfn(addr);
+    unsigned long offset = addr & ~PAGE_MASK;
+    struct page_info *page = get_page_from_gfn(currd, gmfn, NULL, P2M_UNSHARE);
+    uint8_t *p;
+
+    ASSERT(offset + size < PAGE_SIZE);
+
+    if ( !page )
+        return X86EMUL_UNHANDLEABLE;
+
+    p = __map_domain_page(page);
+    p += offset;
+    memcpy(data, p, size);
+
+    unmap_domain_page(p);
+    put_page(page);
+
+    return X86EMUL_OKAY;
+}
+
+static const struct hvm_io_ops mem_ops = {
+    .read = mem_read,
+    .write = null_write
+};
+
+static const struct hvm_io_handler mem_handler = {
+    .ops = &mem_ops
+};
+
 static int hvmemul_do_io(
     bool_t is_mmio, paddr_t addr, unsigned long *reps, unsigned int size,
     uint8_t dir, bool_t df, bool_t data_is_addr, uintptr_t data)
@@ -204,7 +239,15 @@  static int hvmemul_do_io(
         /* If there is no suitable backing DM, just ignore accesses */
         if ( !s )
         {
-            rc = hvm_process_io_intercept(&null_handler, &p);
+            /*
+             * For p2m_ioreq_server pages accessed with read-modify-write
+             * instructions, we provide a read handler to copy the data to
+             * the buffer.
+             */
+            if ( p2mt == p2m_ioreq_server )
+                rc = hvm_process_io_intercept(&mem_handler, &p);
+            else
+                rc = hvm_process_io_intercept(&null_handler, &p);
             vio->io_req.state = STATE_IOREQ_NONE;
         }
         else