diff mbox series

VT-x: simplify/clarify vmx_load_pdptrs()

Message ID b2bd8c84-9109-8b21-afb4-51e49b271a83@suse.com (mailing list archive)
State New, archived
Headers show
Series VT-x: simplify/clarify vmx_load_pdptrs() | expand

Commit Message

Jan Beulich June 18, 2020, 6:38 a.m. UTC
* Guests outside of long mode can't have PCID enabled. Drop the
  respective check to make more obvious that there's no security issue
  (from potentially accessing past the mapped page's boundary).

* Only the low 32 bits of CR3 are relevant outside of long mode, even
  if they remained unchanged after leaving that mode.

* Drop the unnecessary and badly typed local variable p.

* Don't open-code hvm_long_mode_active() (and extend this to the related
  nested VT-x code).

* Constify guest_pdptes to clarify that we're only reading from the
  page.

Signed-off-by: Jan Beulich <jbeulich@suse.com>
---
This is intentionally not addressing any of the other shortcomings of
the function, as was done by the previously posted
https://lists.xenproject.org/archives/html/xen-devel/2018-07/msg01790.html.
This will need to be the subject of a further change.

Comments

Roger Pau Monné June 18, 2020, 10:11 a.m. UTC | #1
On Thu, Jun 18, 2020 at 08:38:27AM +0200, Jan Beulich wrote:
> * Guests outside of long mode can't have PCID enabled. Drop the
>   respective check to make more obvious that there's no security issue
>   (from potentially accessing past the mapped page's boundary).
> 
> * Only the low 32 bits of CR3 are relevant outside of long mode, even
>   if they remained unchanged after leaving that mode.
> 
> * Drop the unnecessary and badly typed local variable p.
> 
> * Don't open-code hvm_long_mode_active() (and extend this to the related
>   nested VT-x code).
> 
> * Constify guest_pdptes to clarify that we're only reading from the
>   page.
> 
> Signed-off-by: Jan Beulich <jbeulich@suse.com>

Reviewed-by: Roger Pau Monné <roger.pau@citrix.com>

As it's no worse that what was there before, yet I have a question.

> ---
> This is intentionally not addressing any of the other shortcomings of
> the function, as was done by the previously posted
> https://lists.xenproject.org/archives/html/xen-devel/2018-07/msg01790.html.
> This will need to be the subject of a further change.
> 
> --- a/xen/arch/x86/hvm/vmx/vmx.c
> +++ b/xen/arch/x86/hvm/vmx/vmx.c
> @@ -1312,17 +1312,16 @@ static void vmx_set_interrupt_shadow(str
>  
>  static void vmx_load_pdptrs(struct vcpu *v)
>  {
> -    unsigned long cr3 = v->arch.hvm.guest_cr[3];
> -    uint64_t *guest_pdptes;
> +    uint32_t cr3 = v->arch.hvm.guest_cr[3];
> +    const uint64_t *guest_pdptes;
>      struct page_info *page;
>      p2m_type_t p2mt;
> -    char *p;
>  
>      /* EPT needs to load PDPTRS into VMCS for PAE. */
> -    if ( !hvm_pae_enabled(v) || (v->arch.hvm.guest_efer & EFER_LMA) )
> +    if ( !hvm_pae_enabled(v) || hvm_long_mode_active(v) )
>          return;
>  
> -    if ( (cr3 & 0x1fUL) && !hvm_pcid_enabled(v) )
> +    if ( cr3 & 0x1f )
>          goto crash;

Intel SDM says bits 0 to 4 are ignored, not reserved, so I'm not sure
we need to crash the guest here. I have no reference how real hardware
behaves here, so maybe crashing is the right thing to do.

>  
>      page = get_page_from_gfn(v->domain, cr3 >> PAGE_SHIFT, &p2mt, P2M_UNSHARE);
> @@ -1332,14 +1331,12 @@ static void vmx_load_pdptrs(struct vcpu
>           * queue, but this is the wrong place. We're holding at least
>           * the paging lock */
>          gdprintk(XENLOG_ERR,
> -                 "Bad cr3 on load pdptrs gfn %lx type %d\n",
> +                 "Bad cr3 on load pdptrs gfn %"PRIx32" type %d\n",
>                   cr3 >> PAGE_SHIFT, (int) p2mt);
>          goto crash;
>      }
>  
> -    p = __map_domain_page(page);
> -
> -    guest_pdptes = (uint64_t *)(p + (cr3 & ~PAGE_MASK));
> +    guest_pdptes = __map_domain_page(page) + (cr3 & ~PAGE_MASK);

Instead I think this could be:

guest_pdptes = __map_domain_page(page) + (cr3 & ~(PAGE_MASK | 0x1f));

Thanks, Roger.
Jan Beulich June 18, 2020, 11:49 a.m. UTC | #2
On 18.06.2020 12:11, Roger Pau Monné wrote:
> On Thu, Jun 18, 2020 at 08:38:27AM +0200, Jan Beulich wrote:
>> * Guests outside of long mode can't have PCID enabled. Drop the
>>   respective check to make more obvious that there's no security issue
>>   (from potentially accessing past the mapped page's boundary).
>>
>> * Only the low 32 bits of CR3 are relevant outside of long mode, even
>>   if they remained unchanged after leaving that mode.
>>
>> * Drop the unnecessary and badly typed local variable p.
>>
>> * Don't open-code hvm_long_mode_active() (and extend this to the related
>>   nested VT-x code).
>>
>> * Constify guest_pdptes to clarify that we're only reading from the
>>   page.
>>
>> Signed-off-by: Jan Beulich <jbeulich@suse.com>
> 
> Reviewed-by: Roger Pau Monné <roger.pau@citrix.com>

Thanks.

>> --- a/xen/arch/x86/hvm/vmx/vmx.c
>> +++ b/xen/arch/x86/hvm/vmx/vmx.c
>> @@ -1312,17 +1312,16 @@ static void vmx_set_interrupt_shadow(str
>>  
>>  static void vmx_load_pdptrs(struct vcpu *v)
>>  {
>> -    unsigned long cr3 = v->arch.hvm.guest_cr[3];
>> -    uint64_t *guest_pdptes;
>> +    uint32_t cr3 = v->arch.hvm.guest_cr[3];
>> +    const uint64_t *guest_pdptes;
>>      struct page_info *page;
>>      p2m_type_t p2mt;
>> -    char *p;
>>  
>>      /* EPT needs to load PDPTRS into VMCS for PAE. */
>> -    if ( !hvm_pae_enabled(v) || (v->arch.hvm.guest_efer & EFER_LMA) )
>> +    if ( !hvm_pae_enabled(v) || hvm_long_mode_active(v) )
>>          return;
>>  
>> -    if ( (cr3 & 0x1fUL) && !hvm_pcid_enabled(v) )
>> +    if ( cr3 & 0x1f )
>>          goto crash;
> 
> Intel SDM says bits 0 to 4 are ignored, not reserved, so I'm not sure
> we need to crash the guest here. I have no reference how real hardware
> behaves here, so maybe crashing is the right thing to do.

No, I was mislead by the wording of the description in the old
patch I've referenced. IOW ...

>> @@ -1332,14 +1331,12 @@ static void vmx_load_pdptrs(struct vcpu
>>           * queue, but this is the wrong place. We're holding at least
>>           * the paging lock */
>>          gdprintk(XENLOG_ERR,
>> -                 "Bad cr3 on load pdptrs gfn %lx type %d\n",
>> +                 "Bad cr3 on load pdptrs gfn %"PRIx32" type %d\n",
>>                   cr3 >> PAGE_SHIFT, (int) p2mt);
>>          goto crash;
>>      }
>>  
>> -    p = __map_domain_page(page);
>> -
>> -    guest_pdptes = (uint64_t *)(p + (cr3 & ~PAGE_MASK));
>> +    guest_pdptes = __map_domain_page(page) + (cr3 & ~PAGE_MASK);
> 
> Instead I think this could be:
> 
> guest_pdptes = __map_domain_page(page) + (cr3 & ~(PAGE_MASK | 0x1f));

... I agree and will change to this; I'll assume your R-b stands
with the change made (and the description adjusted accordingly).

Jan
Roger Pau Monné June 18, 2020, 12:39 p.m. UTC | #3
On Thu, Jun 18, 2020 at 01:49:58PM +0200, Jan Beulich wrote:
> On 18.06.2020 12:11, Roger Pau Monné wrote:
> > On Thu, Jun 18, 2020 at 08:38:27AM +0200, Jan Beulich wrote:
> >> -    guest_pdptes = (uint64_t *)(p + (cr3 & ~PAGE_MASK));
> >> +    guest_pdptes = __map_domain_page(page) + (cr3 & ~PAGE_MASK);
> > 
> > Instead I think this could be:
> > 
> > guest_pdptes = __map_domain_page(page) + (cr3 & ~(PAGE_MASK | 0x1f));
> 
> ... I agree and will change to this; I'll assume your R-b stands
> with the change made (and the description adjusted accordingly).

Sure.

Thanks, Roger.
Tian, Kevin June 19, 2020, 1:27 a.m. UTC | #4
> From: Jan Beulich <jbeulich@suse.com>
> Sent: Thursday, June 18, 2020 2:38 PM
> 
> * Guests outside of long mode can't have PCID enabled. Drop the
>   respective check to make more obvious that there's no security issue
>   (from potentially accessing past the mapped page's boundary).
> 
> * Only the low 32 bits of CR3 are relevant outside of long mode, even
>   if they remained unchanged after leaving that mode.
> 
> * Drop the unnecessary and badly typed local variable p.
> 
> * Don't open-code hvm_long_mode_active() (and extend this to the related
>   nested VT-x code).
> 
> * Constify guest_pdptes to clarify that we're only reading from the
>   page.
> 
> Signed-off-by: Jan Beulich <jbeulich@suse.com>

Reviewed-by: Kevin Tian <kevin.tian@intel.com>

> ---
> This is intentionally not addressing any of the other shortcomings of
> the function, as was done by the previously posted
> https://lists.xenproject.org/archives/html/xen-devel/2018-
> 07/msg01790.html.
> This will need to be the subject of a further change.
> 
> --- a/xen/arch/x86/hvm/vmx/vmx.c
> +++ b/xen/arch/x86/hvm/vmx/vmx.c
> @@ -1312,17 +1312,16 @@ static void vmx_set_interrupt_shadow(str
> 
>  static void vmx_load_pdptrs(struct vcpu *v)
>  {
> -    unsigned long cr3 = v->arch.hvm.guest_cr[3];
> -    uint64_t *guest_pdptes;
> +    uint32_t cr3 = v->arch.hvm.guest_cr[3];
> +    const uint64_t *guest_pdptes;
>      struct page_info *page;
>      p2m_type_t p2mt;
> -    char *p;
> 
>      /* EPT needs to load PDPTRS into VMCS for PAE. */
> -    if ( !hvm_pae_enabled(v) || (v->arch.hvm.guest_efer & EFER_LMA) )
> +    if ( !hvm_pae_enabled(v) || hvm_long_mode_active(v) )
>          return;
> 
> -    if ( (cr3 & 0x1fUL) && !hvm_pcid_enabled(v) )
> +    if ( cr3 & 0x1f )
>          goto crash;
> 
>      page = get_page_from_gfn(v->domain, cr3 >> PAGE_SHIFT, &p2mt,
> P2M_UNSHARE);
> @@ -1332,14 +1331,12 @@ static void vmx_load_pdptrs(struct vcpu
>           * queue, but this is the wrong place. We're holding at least
>           * the paging lock */
>          gdprintk(XENLOG_ERR,
> -                 "Bad cr3 on load pdptrs gfn %lx type %d\n",
> +                 "Bad cr3 on load pdptrs gfn %"PRIx32" type %d\n",
>                   cr3 >> PAGE_SHIFT, (int) p2mt);
>          goto crash;
>      }
> 
> -    p = __map_domain_page(page);
> -
> -    guest_pdptes = (uint64_t *)(p + (cr3 & ~PAGE_MASK));
> +    guest_pdptes = __map_domain_page(page) + (cr3 & ~PAGE_MASK);
> 
>      /*
>       * We do not check the PDPTRs for validity. The CPU will do this during
> @@ -1356,7 +1353,7 @@ static void vmx_load_pdptrs(struct vcpu
> 
>      vmx_vmcs_exit(v);
> 
> -    unmap_domain_page(p);
> +    unmap_domain_page(guest_pdptes);
>      put_page(page);
>      return;
> 
> --- a/xen/arch/x86/hvm/vmx/vvmx.c
> +++ b/xen/arch/x86/hvm/vmx/vvmx.c
> @@ -1234,7 +1234,7 @@ static void virtual_vmentry(struct cpu_u
>          paging_update_paging_modes(v);
> 
>      if ( nvmx_ept_enabled(v) && hvm_pae_enabled(v) &&
> -         !(v->arch.hvm.guest_efer & EFER_LMA) )
> +         !hvm_long_mode_active(v) )
>          vvmcs_to_shadow_bulk(v, ARRAY_SIZE(gpdpte_fields), gpdpte_fields);
> 
>      regs->rip = get_vvmcs(v, GUEST_RIP);
> @@ -1437,7 +1437,7 @@ static void virtual_vmexit(struct cpu_us
>      sync_exception_state(v);
> 
>      if ( nvmx_ept_enabled(v) && hvm_pae_enabled(v) &&
> -         !(v->arch.hvm.guest_efer & EFER_LMA) )
> +         !hvm_long_mode_active(v) )
>          shadow_to_vvmcs_bulk(v, ARRAY_SIZE(gpdpte_fields), gpdpte_fields);
> 
>      /* This will clear current pCPU bit in p2m->dirty_cpumask */
diff mbox series

Patch

--- a/xen/arch/x86/hvm/vmx/vmx.c
+++ b/xen/arch/x86/hvm/vmx/vmx.c
@@ -1312,17 +1312,16 @@  static void vmx_set_interrupt_shadow(str
 
 static void vmx_load_pdptrs(struct vcpu *v)
 {
-    unsigned long cr3 = v->arch.hvm.guest_cr[3];
-    uint64_t *guest_pdptes;
+    uint32_t cr3 = v->arch.hvm.guest_cr[3];
+    const uint64_t *guest_pdptes;
     struct page_info *page;
     p2m_type_t p2mt;
-    char *p;
 
     /* EPT needs to load PDPTRS into VMCS for PAE. */
-    if ( !hvm_pae_enabled(v) || (v->arch.hvm.guest_efer & EFER_LMA) )
+    if ( !hvm_pae_enabled(v) || hvm_long_mode_active(v) )
         return;
 
-    if ( (cr3 & 0x1fUL) && !hvm_pcid_enabled(v) )
+    if ( cr3 & 0x1f )
         goto crash;
 
     page = get_page_from_gfn(v->domain, cr3 >> PAGE_SHIFT, &p2mt, P2M_UNSHARE);
@@ -1332,14 +1331,12 @@  static void vmx_load_pdptrs(struct vcpu
          * queue, but this is the wrong place. We're holding at least
          * the paging lock */
         gdprintk(XENLOG_ERR,
-                 "Bad cr3 on load pdptrs gfn %lx type %d\n",
+                 "Bad cr3 on load pdptrs gfn %"PRIx32" type %d\n",
                  cr3 >> PAGE_SHIFT, (int) p2mt);
         goto crash;
     }
 
-    p = __map_domain_page(page);
-
-    guest_pdptes = (uint64_t *)(p + (cr3 & ~PAGE_MASK));
+    guest_pdptes = __map_domain_page(page) + (cr3 & ~PAGE_MASK);
 
     /*
      * We do not check the PDPTRs for validity. The CPU will do this during
@@ -1356,7 +1353,7 @@  static void vmx_load_pdptrs(struct vcpu
 
     vmx_vmcs_exit(v);
 
-    unmap_domain_page(p);
+    unmap_domain_page(guest_pdptes);
     put_page(page);
     return;
 
--- a/xen/arch/x86/hvm/vmx/vvmx.c
+++ b/xen/arch/x86/hvm/vmx/vvmx.c
@@ -1234,7 +1234,7 @@  static void virtual_vmentry(struct cpu_u
         paging_update_paging_modes(v);
 
     if ( nvmx_ept_enabled(v) && hvm_pae_enabled(v) &&
-         !(v->arch.hvm.guest_efer & EFER_LMA) )
+         !hvm_long_mode_active(v) )
         vvmcs_to_shadow_bulk(v, ARRAY_SIZE(gpdpte_fields), gpdpte_fields);
 
     regs->rip = get_vvmcs(v, GUEST_RIP);
@@ -1437,7 +1437,7 @@  static void virtual_vmexit(struct cpu_us
     sync_exception_state(v);
 
     if ( nvmx_ept_enabled(v) && hvm_pae_enabled(v) &&
-         !(v->arch.hvm.guest_efer & EFER_LMA) )
+         !hvm_long_mode_active(v) )
         shadow_to_vvmcs_bulk(v, ARRAY_SIZE(gpdpte_fields), gpdpte_fields);
 
     /* This will clear current pCPU bit in p2m->dirty_cpumask */