diff mbox

[v6,05/12] x86/domctl: Handle ACPI access from domctl

Message ID 1483452256-2879-6-git-send-email-boris.ostrovsky@oracle.com (mailing list archive)
State New, archived
Headers show

Commit Message

Boris Ostrovsky Jan. 3, 2017, 2:04 p.m. UTC
Signed-off-by: Boris Ostrovsky <boris.ostrovsky@oracle.com>
---
Changes in v6:
* Adjustments to to patch 4 changes.
* Added a spinlock for VCPU map access
* Return an error on guest trying to write VCPU map

 xen/arch/x86/hvm/acpi.c          | 57 +++++++++++++++++++++++++++++++++++-----
 xen/include/asm-x86/hvm/domain.h |  1 +
 2 files changed, 52 insertions(+), 6 deletions(-)

Comments

Ross Lagerwall July 31, 2017, 2:14 p.m. UTC | #1
On 01/03/2017 02:04 PM, Boris Ostrovsky wrote:
> Signed-off-by: Boris Ostrovsky <boris.ostrovsky@oracle.com>
> ---
> Changes in v6:
> * Adjustments to to patch 4 changes.
> * Added a spinlock for VCPU map access
> * Return an error on guest trying to write VCPU map
> 
snip
> -static int acpi_cpumap_access_common(struct domain *d, bool is_write,
> -                                     unsigned int port,
> +static int acpi_cpumap_access_common(struct domain *d, bool is_guest_access,
> +                                     bool is_write, unsigned int port,
>                                        unsigned int bytes, uint32_t *val)
>   {
>       unsigned int first_byte = port - XEN_ACPI_CPU_MAP;
> +    int rc = X86EMUL_OKAY;
> 
>       BUILD_BUG_ON(XEN_ACPI_CPU_MAP + XEN_ACPI_CPU_MAP_LEN
>                    > ACPI_GPE0_BLK_ADDRESS_V1);
> 
> +    spin_lock(&d->arch.hvm_domain.acpi_lock);
> +
>       if ( !is_write )
>       {
>           uint32_t mask = (bytes < 4) ? ~0U << (bytes * 8) : 0;
> @@ -32,23 +37,61 @@ static int acpi_cpumap_access_common(struct domain *d, bool is_write,
>               memcpy(val, (uint8_t *)d->avail_vcpus + first_byte,
>                      min(bytes, ((d->max_vcpus + 7) / 8) - first_byte));
>       }
> +    else if ( !is_guest_access )
> +        memcpy((uint8_t *)d->avail_vcpus + first_byte, val,
> +               min(bytes, ((d->max_vcpus + 7) / 8) - first_byte));
>       else
>           /* Guests do not write CPU map */
> -        return X86EMUL_UNHANDLEABLE;
> +        rc = X86EMUL_UNHANDLEABLE;
> 
> -    return X86EMUL_OKAY;
> +    spin_unlock(&d->arch.hvm_domain.acpi_lock);
> +
> +    return rc;
>   }
> 
>   int hvm_acpi_domctl_access(struct domain *d,
>                              const struct xen_domctl_acpi_access *access)
>   {
> -    return -ENOSYS;
> +    unsigned int bytes, i;
> +    uint32_t val = 0;
> +    uint8_t *ptr = (uint8_t *)&val;
> +    int rc;
> +    bool is_write = (access->rw == XEN_DOMCTL_ACPI_WRITE) ? true : false;
> +
> +    if ( has_acpi_dm_ff(d) )
> +        return -EOPNOTSUPP;
> +
> +    if ( access->space_id != XEN_ACPI_SYSTEM_IO )
> +        return -EINVAL;
> +
> +    if ( !((access->address >= XEN_ACPI_CPU_MAP) &&
> +           (access->address < XEN_ACPI_CPU_MAP + XEN_ACPI_CPU_MAP_LEN)) )
> +        return -ENODEV;
> +
> +    for ( i = 0; i < access->width; i += sizeof(val) )
> +    {
> +        bytes = (access->width - i > sizeof(val)) ?
> +            sizeof(val) : access->width - i;
> +
> +        if ( is_write && copy_from_guest_offset(ptr, access->val, i, bytes) )
> +            return -EFAULT;
> +
> +        rc = acpi_cpumap_access_common(d, false, is_write,
> +                                       access->address, bytes, &val);

While I'm looking at this code...
This doesn't work if access->width > sizeof(val) (4 bytes). The same 
value (access->address) is always passed into acpi_cpumap_access_common 
for 'port' and this is used as an offset into the avail_cpus array. So 
the offset is unchanged and only the first 4 bytes of avail_cpus ever 
gets changed.
Boris Ostrovsky July 31, 2017, 2:59 p.m. UTC | #2
On 07/31/2017 10:14 AM, Ross Lagerwall wrote:
> On 01/03/2017 02:04 PM, Boris Ostrovsky wrote:
>> Signed-off-by: Boris Ostrovsky <boris.ostrovsky@oracle.com>
>> ---
>> Changes in v6:
>> * Adjustments to to patch 4 changes.
>> * Added a spinlock for VCPU map access
>> * Return an error on guest trying to write VCPU map
>>
> snip
>> -static int acpi_cpumap_access_common(struct domain *d, bool is_write,
>> -                                     unsigned int port,
>> +static int acpi_cpumap_access_common(struct domain *d, bool
>> is_guest_access,
>> +                                     bool is_write, unsigned int port,
>>                                        unsigned int bytes, uint32_t
>> *val)
>>   {
>>       unsigned int first_byte = port - XEN_ACPI_CPU_MAP;
>> +    int rc = X86EMUL_OKAY;
>>
>>       BUILD_BUG_ON(XEN_ACPI_CPU_MAP + XEN_ACPI_CPU_MAP_LEN
>>                    > ACPI_GPE0_BLK_ADDRESS_V1);
>>
>> +    spin_lock(&d->arch.hvm_domain.acpi_lock);
>> +
>>       if ( !is_write )
>>       {
>>           uint32_t mask = (bytes < 4) ? ~0U << (bytes * 8) : 0;
>> @@ -32,23 +37,61 @@ static int acpi_cpumap_access_common(struct
>> domain *d, bool is_write,
>>               memcpy(val, (uint8_t *)d->avail_vcpus + first_byte,
>>                      min(bytes, ((d->max_vcpus + 7) / 8) - first_byte));
>>       }
>> +    else if ( !is_guest_access )
>> +        memcpy((uint8_t *)d->avail_vcpus + first_byte, val,
>> +               min(bytes, ((d->max_vcpus + 7) / 8) - first_byte));
>>       else
>>           /* Guests do not write CPU map */
>> -        return X86EMUL_UNHANDLEABLE;
>> +        rc = X86EMUL_UNHANDLEABLE;
>>
>> -    return X86EMUL_OKAY;
>> +    spin_unlock(&d->arch.hvm_domain.acpi_lock);
>> +
>> +    return rc;
>>   }
>>
>>   int hvm_acpi_domctl_access(struct domain *d,
>>                              const struct xen_domctl_acpi_access
>> *access)
>>   {
>> -    return -ENOSYS;
>> +    unsigned int bytes, i;
>> +    uint32_t val = 0;
>> +    uint8_t *ptr = (uint8_t *)&val;
>> +    int rc;
>> +    bool is_write = (access->rw == XEN_DOMCTL_ACPI_WRITE) ? true :
>> false;
>> +
>> +    if ( has_acpi_dm_ff(d) )
>> +        return -EOPNOTSUPP;
>> +
>> +    if ( access->space_id != XEN_ACPI_SYSTEM_IO )
>> +        return -EINVAL;
>> +
>> +    if ( !((access->address >= XEN_ACPI_CPU_MAP) &&
>> +           (access->address < XEN_ACPI_CPU_MAP +
>> XEN_ACPI_CPU_MAP_LEN)) )
>> +        return -ENODEV;
>> +
>> +    for ( i = 0; i < access->width; i += sizeof(val) )
>> +    {
>> +        bytes = (access->width - i > sizeof(val)) ?
>> +            sizeof(val) : access->width - i;
>> +
>> +        if ( is_write && copy_from_guest_offset(ptr, access->val, i,
>> bytes) )
>> +            return -EFAULT;
>> +
>> +        rc = acpi_cpumap_access_common(d, false, is_write,
>> +                                       access->address, bytes, &val);
>
> While I'm looking at this code...
> This doesn't work if access->width > sizeof(val) (4 bytes). The same
> value (access->address) is always passed into
> acpi_cpumap_access_common for 'port' and this is used as an offset
> into the avail_cpus array. So the offset is unchanged and only the
> first 4 bytes of avail_cpus ever gets changed.

I'd have to go back to the series (haven't looked at it since it was
posted back in January) but I think I enforce somewhere size of the
access to fit into 4 bytes. And if not then you are right.


-boris
diff mbox

Patch

diff --git a/xen/arch/x86/hvm/acpi.c b/xen/arch/x86/hvm/acpi.c
index f0a84f9..9f0578e 100644
--- a/xen/arch/x86/hvm/acpi.c
+++ b/xen/arch/x86/hvm/acpi.c
@@ -7,17 +7,22 @@ 
 #include <xen/lib.h>
 #include <xen/sched.h>
 
+#include <asm/guest_access.h>
+
 #include <public/arch-x86/xen.h>
 
-static int acpi_cpumap_access_common(struct domain *d, bool is_write,
-                                     unsigned int port,
+static int acpi_cpumap_access_common(struct domain *d, bool is_guest_access,
+                                     bool is_write, unsigned int port,
                                      unsigned int bytes, uint32_t *val)
 {
     unsigned int first_byte = port - XEN_ACPI_CPU_MAP;
+    int rc = X86EMUL_OKAY;
 
     BUILD_BUG_ON(XEN_ACPI_CPU_MAP + XEN_ACPI_CPU_MAP_LEN
                  > ACPI_GPE0_BLK_ADDRESS_V1);
 
+    spin_lock(&d->arch.hvm_domain.acpi_lock);
+
     if ( !is_write )
     {
         uint32_t mask = (bytes < 4) ? ~0U << (bytes * 8) : 0;
@@ -32,23 +37,61 @@  static int acpi_cpumap_access_common(struct domain *d, bool is_write,
             memcpy(val, (uint8_t *)d->avail_vcpus + first_byte,
                    min(bytes, ((d->max_vcpus + 7) / 8) - first_byte));
     }
+    else if ( !is_guest_access )
+        memcpy((uint8_t *)d->avail_vcpus + first_byte, val,
+               min(bytes, ((d->max_vcpus + 7) / 8) - first_byte));
     else
         /* Guests do not write CPU map */
-        return X86EMUL_UNHANDLEABLE;
+        rc = X86EMUL_UNHANDLEABLE;
 
-    return X86EMUL_OKAY;
+    spin_unlock(&d->arch.hvm_domain.acpi_lock);
+
+    return rc;
 }
 
 int hvm_acpi_domctl_access(struct domain *d,
                            const struct xen_domctl_acpi_access *access)
 {
-    return -ENOSYS;
+    unsigned int bytes, i;
+    uint32_t val = 0;
+    uint8_t *ptr = (uint8_t *)&val;
+    int rc;
+    bool is_write = (access->rw == XEN_DOMCTL_ACPI_WRITE) ? true : false;
+
+    if ( has_acpi_dm_ff(d) )
+        return -EOPNOTSUPP;
+
+    if ( access->space_id != XEN_ACPI_SYSTEM_IO )
+        return -EINVAL;
+
+    if ( !((access->address >= XEN_ACPI_CPU_MAP) &&
+           (access->address < XEN_ACPI_CPU_MAP + XEN_ACPI_CPU_MAP_LEN)) )
+        return -ENODEV;
+
+    for ( i = 0; i < access->width; i += sizeof(val) )
+    {
+        bytes = (access->width - i > sizeof(val)) ?
+            sizeof(val) : access->width - i;
+
+        if ( is_write && copy_from_guest_offset(ptr, access->val, i, bytes) )
+            return -EFAULT;
+
+        rc = acpi_cpumap_access_common(d, false, is_write,
+                                       access->address, bytes, &val);
+        if ( rc )
+            return rc;
+
+        if ( !is_write && copy_to_guest_offset(access->val, i, ptr, bytes) )
+            return -EFAULT;
+    }
+
+    return 0;
 }
 
 static int acpi_cpumap_guest_access(int dir, unsigned int port,
                                     unsigned int bytes, uint32_t *val)
 {
-    return  acpi_cpumap_access_common(current->domain,
+    return  acpi_cpumap_access_common(current->domain, true,
                                       (dir == IOREQ_WRITE) ? true : false,
                                       port, bytes, val);
 }
@@ -148,6 +191,8 @@  void hvm_acpi_init(struct domain *d)
                             sizeof(d->arch.hvm_domain.acpi.pm1a_sts) +
                             sizeof(d->arch.hvm_domain.acpi.pm1a_en),
                             acpi_guest_access);
+
+    spin_lock_init(&d->arch.hvm_domain.acpi_lock);
 }
 
 /*
diff --git a/xen/include/asm-x86/hvm/domain.h b/xen/include/asm-x86/hvm/domain.h
index 07815b6..438ea12 100644
--- a/xen/include/asm-x86/hvm/domain.h
+++ b/xen/include/asm-x86/hvm/domain.h
@@ -111,6 +111,7 @@  struct hvm_domain {
      */
 #define hvm_hw_acpi hvm_hw_pmtimer
     struct hvm_hw_acpi     acpi;
+    spinlock_t             acpi_lock;
 
     /* VCPU which is current target for 8259 interrupts. */
     struct vcpu           *i8259_target;