diff mbox series

[5/5] x86/HVM: improve CET-IBT pruning of ENDBR

Message ID 40585213-99ac-43b4-9432-03d739ec452c@suse.com (mailing list archive)
State New, archived
Headers show
Series x86/HVM: misc tidying | expand

Commit Message

Jan Beulich Nov. 16, 2023, 1:33 p.m. UTC
__init{const,data}_cf_clobber can have an effect only for pointers
actually populated in the respective tables. While not the case for SVM
right now, VMX installs a number of pointers only under certain
conditions. Hence the respective functions would have their ENDBR purged
only when those conditions are met. Invoke "pruning" functions after
having copied the respective tables, for them to install any "missing"
pointers.

Signed-off-by: Jan Beulich <jbeulich@suse.com>
---
This is largely cosmetic for present hardware, which when supporting
CET-IBT likely also supports all of the advanced VMX features for which
hook pointers are installed conditionally. The only case this would make
a difference there is when use of respective features was suppressed via
command line option (where available). For future hooks it may end up
relevant even by default, and it also would be if AMD started supporting
CET-IBT; right now it matters only for .pi_update_irte, as iommu_intpost
continues to default to off.

Originally I had meant to put the SVM and VMX functions in presmp-
initcalls, but hvm/{svm,vmx}/built_in.o are linked into hvm/built_in.o
before hvm/hvm.o. And I don't think I want to fiddle with link order
here.

Comments

Roger Pau Monné Nov. 22, 2023, 10:08 a.m. UTC | #1
On Thu, Nov 16, 2023 at 02:33:14PM +0100, Jan Beulich wrote:
> __init{const,data}_cf_clobber can have an effect only for pointers
> actually populated in the respective tables. While not the case for SVM
> right now, VMX installs a number of pointers only under certain
> conditions. Hence the respective functions would have their ENDBR purged
> only when those conditions are met. Invoke "pruning" functions after
> having copied the respective tables, for them to install any "missing"
> pointers.
> 
> Signed-off-by: Jan Beulich <jbeulich@suse.com>
> ---
> This is largely cosmetic for present hardware, which when supporting
> CET-IBT likely also supports all of the advanced VMX features for which
> hook pointers are installed conditionally. The only case this would make
> a difference there is when use of respective features was suppressed via
> command line option (where available). For future hooks it may end up
> relevant even by default, and it also would be if AMD started supporting
> CET-IBT; right now it matters only for .pi_update_irte, as iommu_intpost
> continues to default to off.
> 
> Originally I had meant to put the SVM and VMX functions in presmp-
> initcalls, but hvm/{svm,vmx}/built_in.o are linked into hvm/built_in.o
> before hvm/hvm.o. And I don't think I want to fiddle with link order
> here.
> 
> --- a/xen/arch/x86/hvm/hvm.c
> +++ b/xen/arch/x86/hvm/hvm.c
> @@ -161,10 +161,15 @@ static int __init cf_check hvm_enable(vo
>      else if ( cpu_has_svm )
>          fns = start_svm();
>  
> +    if ( fns )
> +        hvm_funcs = *fns;
> +
> +    prune_vmx();
> +    prune_svm();
> +
>      if ( fns == NULL )
>          return 0;
>  
> -    hvm_funcs = *fns;
>      hvm_enabled = 1;
>  
>      printk("HVM: %s enabled\n", fns->name);
> --- a/xen/arch/x86/hvm/svm/svm.c
> +++ b/xen/arch/x86/hvm/svm/svm.c
> @@ -2587,6 +2587,19 @@ const struct hvm_function_table * __init
>      return &svm_function_table;
>  }
>  
> +void __init prune_svm(void)
> +{
> +    /*
> +     * Now that svm_function_table was copied, populate all function pointers
> +     * which may have been left at NULL, for __initdata_cf_clobber to have as
> +     * much of an effect as possible.
> +     */
> +    if ( !IS_ENABLED(CONFIG_XEN_IBT) )

Shouldn't this better use cpu_has_xen_ibt?

Otherwise the clobbering done in _apply_alternatives() won't be
engaged, so it's pointless to set the extra fields.

> +        return;
> +
> +    /* Nothing at present. */
> +}
> +
>  void svm_vmexit_handler(void)
>  {
>      struct cpu_user_regs *regs = guest_cpu_user_regs();
> --- a/xen/arch/x86/hvm/vmx/vmx.c
> +++ b/xen/arch/x86/hvm/vmx/vmx.c
> @@ -3032,6 +3032,30 @@ const struct hvm_function_table * __init
>      return &vmx_function_table;
>  }
>  
> +void __init prune_vmx(void)
> +{
> +    /*
> +     * Now that vmx_function_table was copied, populate all function pointers
> +     * which may have been left at NULL, for __initdata_cf_clobber to have as
> +     * much of an effect as possible.
> +     */
> +    if ( !IS_ENABLED(CONFIG_XEN_IBT) )
> +        return;
> +
> +    vmx_function_table.set_descriptor_access_exiting =
> +        vmx_set_descriptor_access_exiting;
> +
> +    vmx_function_table.update_eoi_exit_bitmap = vmx_update_eoi_exit_bitmap;
> +    vmx_function_table.process_isr            = vmx_process_isr;
> +    vmx_function_table.handle_eoi             = vmx_handle_eoi;
> +
> +    vmx_function_table.pi_update_irte = vmx_pi_update_irte;
> +
> +    vmx_function_table.deliver_posted_intr = vmx_deliver_posted_intr;
> +    vmx_function_table.sync_pir_to_irr     = vmx_sync_pir_to_irr;
> +    vmx_function_table.test_pir            = vmx_test_pir;

Hm, I find this quite fragile, as it's easy to add a new handler
without realizing that addition here might also be required.

I don't really have good ideas about how to handle this, unless we
populate unused handlers with some poison and loop over the structure
as an array of pointers and choke on finding one of them pointing to
NULL.

Thanks, Roger.
Jan Beulich Nov. 22, 2023, 10:42 a.m. UTC | #2
On 22.11.2023 11:08, Roger Pau Monné wrote:
> On Thu, Nov 16, 2023 at 02:33:14PM +0100, Jan Beulich wrote:
>> --- a/xen/arch/x86/hvm/svm/svm.c
>> +++ b/xen/arch/x86/hvm/svm/svm.c
>> @@ -2587,6 +2587,19 @@ const struct hvm_function_table * __init
>>      return &svm_function_table;
>>  }
>>  
>> +void __init prune_svm(void)
>> +{
>> +    /*
>> +     * Now that svm_function_table was copied, populate all function pointers
>> +     * which may have been left at NULL, for __initdata_cf_clobber to have as
>> +     * much of an effect as possible.
>> +     */
>> +    if ( !IS_ENABLED(CONFIG_XEN_IBT) )
> 
> Shouldn't this better use cpu_has_xen_ibt?
> 
> Otherwise the clobbering done in _apply_alternatives() won't be
> engaged, so it's pointless to set the extra fields.

That's better answered in the context of ...

>> --- a/xen/arch/x86/hvm/vmx/vmx.c
>> +++ b/xen/arch/x86/hvm/vmx/vmx.c
>> @@ -3032,6 +3032,30 @@ const struct hvm_function_table * __init
>>      return &vmx_function_table;
>>  }
>>  
>> +void __init prune_vmx(void)
>> +{
>> +    /*
>> +     * Now that vmx_function_table was copied, populate all function pointers
>> +     * which may have been left at NULL, for __initdata_cf_clobber to have as
>> +     * much of an effect as possible.
>> +     */
>> +    if ( !IS_ENABLED(CONFIG_XEN_IBT) )
>> +        return;
>> +
>> +    vmx_function_table.set_descriptor_access_exiting =
>> +        vmx_set_descriptor_access_exiting;
>> +
>> +    vmx_function_table.update_eoi_exit_bitmap = vmx_update_eoi_exit_bitmap;
>> +    vmx_function_table.process_isr            = vmx_process_isr;
>> +    vmx_function_table.handle_eoi             = vmx_handle_eoi;
>> +
>> +    vmx_function_table.pi_update_irte = vmx_pi_update_irte;
>> +
>> +    vmx_function_table.deliver_posted_intr = vmx_deliver_posted_intr;
>> +    vmx_function_table.sync_pir_to_irr     = vmx_sync_pir_to_irr;
>> +    vmx_function_table.test_pir            = vmx_test_pir;

... this: The goal of having a compile time conditional was to have the
compiler eliminate the code when not needed. Otherwise there's no real
reason to have a conditional there in the first place - we can as well
always install all these pointers.

> Hm, I find this quite fragile, as it's easy to add a new handler
> without realizing that addition here might also be required.

Indeed, but that's not the end of the world (as much as so far it
wasn't deemed necessary at all to try and also purge unused hooks'
ENDBR).

> I don't really have good ideas about how to handle this, unless we
> populate unused handlers with some poison and loop over the structure
> as an array of pointers and choke on finding one of them pointing to
> NULL.

The looping over the resulting section is already somewhat fragile,
when considering other non-pointer data in there. A specific poison
value would further increase the risk of mistaking a value for what
it doesn't represent, even if just a tiny bit.

Populating unused handlers with some poison value would also have the
same problem of being easy to forget when adding a new hook.

Jan
Roger Pau Monné Nov. 22, 2023, 12:01 p.m. UTC | #3
On Wed, Nov 22, 2023 at 11:42:16AM +0100, Jan Beulich wrote:
> On 22.11.2023 11:08, Roger Pau Monné wrote:
> > On Thu, Nov 16, 2023 at 02:33:14PM +0100, Jan Beulich wrote:
> >> --- a/xen/arch/x86/hvm/svm/svm.c
> >> +++ b/xen/arch/x86/hvm/svm/svm.c
> >> @@ -2587,6 +2587,19 @@ const struct hvm_function_table * __init
> >>      return &svm_function_table;
> >>  }
> >>  
> >> +void __init prune_svm(void)
> >> +{
> >> +    /*
> >> +     * Now that svm_function_table was copied, populate all function pointers
> >> +     * which may have been left at NULL, for __initdata_cf_clobber to have as
> >> +     * much of an effect as possible.
> >> +     */
> >> +    if ( !IS_ENABLED(CONFIG_XEN_IBT) )
> > 
> > Shouldn't this better use cpu_has_xen_ibt?
> > 
> > Otherwise the clobbering done in _apply_alternatives() won't be
> > engaged, so it's pointless to set the extra fields.
> 
> That's better answered in the context of ...
> 
> >> --- a/xen/arch/x86/hvm/vmx/vmx.c
> >> +++ b/xen/arch/x86/hvm/vmx/vmx.c
> >> @@ -3032,6 +3032,30 @@ const struct hvm_function_table * __init
> >>      return &vmx_function_table;
> >>  }
> >>  
> >> +void __init prune_vmx(void)
> >> +{
> >> +    /*
> >> +     * Now that vmx_function_table was copied, populate all function pointers
> >> +     * which may have been left at NULL, for __initdata_cf_clobber to have as
> >> +     * much of an effect as possible.
> >> +     */
> >> +    if ( !IS_ENABLED(CONFIG_XEN_IBT) )
> >> +        return;
> >> +
> >> +    vmx_function_table.set_descriptor_access_exiting =
> >> +        vmx_set_descriptor_access_exiting;
> >> +
> >> +    vmx_function_table.update_eoi_exit_bitmap = vmx_update_eoi_exit_bitmap;
> >> +    vmx_function_table.process_isr            = vmx_process_isr;
> >> +    vmx_function_table.handle_eoi             = vmx_handle_eoi;
> >> +
> >> +    vmx_function_table.pi_update_irte = vmx_pi_update_irte;
> >> +
> >> +    vmx_function_table.deliver_posted_intr = vmx_deliver_posted_intr;
> >> +    vmx_function_table.sync_pir_to_irr     = vmx_sync_pir_to_irr;
> >> +    vmx_function_table.test_pir            = vmx_test_pir;
> 
> ... this: The goal of having a compile time conditional was to have the
> compiler eliminate the code when not needed. Otherwise there's no real
> reason to have a conditional there in the first place - we can as well
> always install all these pointers.

Maybe do:

if ( !IS_ENABLED(CONFIG_XEN_IBT) || !cpu_has_xen_ibt )

then?

Thanks, Roger.
Jan Beulich Nov. 22, 2023, 12:11 p.m. UTC | #4
On 22.11.2023 13:01, Roger Pau Monné wrote:
> On Wed, Nov 22, 2023 at 11:42:16AM +0100, Jan Beulich wrote:
>> On 22.11.2023 11:08, Roger Pau Monné wrote:
>>> On Thu, Nov 16, 2023 at 02:33:14PM +0100, Jan Beulich wrote:
>>>> --- a/xen/arch/x86/hvm/svm/svm.c
>>>> +++ b/xen/arch/x86/hvm/svm/svm.c
>>>> @@ -2587,6 +2587,19 @@ const struct hvm_function_table * __init
>>>>      return &svm_function_table;
>>>>  }
>>>>  
>>>> +void __init prune_svm(void)
>>>> +{
>>>> +    /*
>>>> +     * Now that svm_function_table was copied, populate all function pointers
>>>> +     * which may have been left at NULL, for __initdata_cf_clobber to have as
>>>> +     * much of an effect as possible.
>>>> +     */
>>>> +    if ( !IS_ENABLED(CONFIG_XEN_IBT) )
>>>
>>> Shouldn't this better use cpu_has_xen_ibt?
>>>
>>> Otherwise the clobbering done in _apply_alternatives() won't be
>>> engaged, so it's pointless to set the extra fields.
>>
>> That's better answered in the context of ...
>>
>>>> --- a/xen/arch/x86/hvm/vmx/vmx.c
>>>> +++ b/xen/arch/x86/hvm/vmx/vmx.c
>>>> @@ -3032,6 +3032,30 @@ const struct hvm_function_table * __init
>>>>      return &vmx_function_table;
>>>>  }
>>>>  
>>>> +void __init prune_vmx(void)
>>>> +{
>>>> +    /*
>>>> +     * Now that vmx_function_table was copied, populate all function pointers
>>>> +     * which may have been left at NULL, for __initdata_cf_clobber to have as
>>>> +     * much of an effect as possible.
>>>> +     */
>>>> +    if ( !IS_ENABLED(CONFIG_XEN_IBT) )
>>>> +        return;
>>>> +
>>>> +    vmx_function_table.set_descriptor_access_exiting =
>>>> +        vmx_set_descriptor_access_exiting;
>>>> +
>>>> +    vmx_function_table.update_eoi_exit_bitmap = vmx_update_eoi_exit_bitmap;
>>>> +    vmx_function_table.process_isr            = vmx_process_isr;
>>>> +    vmx_function_table.handle_eoi             = vmx_handle_eoi;
>>>> +
>>>> +    vmx_function_table.pi_update_irte = vmx_pi_update_irte;
>>>> +
>>>> +    vmx_function_table.deliver_posted_intr = vmx_deliver_posted_intr;
>>>> +    vmx_function_table.sync_pir_to_irr     = vmx_sync_pir_to_irr;
>>>> +    vmx_function_table.test_pir            = vmx_test_pir;
>>
>> ... this: The goal of having a compile time conditional was to have the
>> compiler eliminate the code when not needed. Otherwise there's no real
>> reason to have a conditional there in the first place - we can as well
>> always install all these pointers.
> 
> Maybe do:
> 
> if ( !IS_ENABLED(CONFIG_XEN_IBT) || !cpu_has_xen_ibt )
> 
> then?

Maybe. Yet then perhaps cpu_has_xen_ibt might better include the build-time
check already?

Jan
Roger Pau Monné Nov. 22, 2023, 1:41 p.m. UTC | #5
On Wed, Nov 22, 2023 at 01:11:36PM +0100, Jan Beulich wrote:
> On 22.11.2023 13:01, Roger Pau Monné wrote:
> > On Wed, Nov 22, 2023 at 11:42:16AM +0100, Jan Beulich wrote:
> >> On 22.11.2023 11:08, Roger Pau Monné wrote:
> >>> On Thu, Nov 16, 2023 at 02:33:14PM +0100, Jan Beulich wrote:
> >>>> --- a/xen/arch/x86/hvm/svm/svm.c
> >>>> +++ b/xen/arch/x86/hvm/svm/svm.c
> >>>> @@ -2587,6 +2587,19 @@ const struct hvm_function_table * __init
> >>>>      return &svm_function_table;
> >>>>  }
> >>>>  
> >>>> +void __init prune_svm(void)
> >>>> +{
> >>>> +    /*
> >>>> +     * Now that svm_function_table was copied, populate all function pointers
> >>>> +     * which may have been left at NULL, for __initdata_cf_clobber to have as
> >>>> +     * much of an effect as possible.
> >>>> +     */
> >>>> +    if ( !IS_ENABLED(CONFIG_XEN_IBT) )
> >>>
> >>> Shouldn't this better use cpu_has_xen_ibt?
> >>>
> >>> Otherwise the clobbering done in _apply_alternatives() won't be
> >>> engaged, so it's pointless to set the extra fields.
> >>
> >> That's better answered in the context of ...
> >>
> >>>> --- a/xen/arch/x86/hvm/vmx/vmx.c
> >>>> +++ b/xen/arch/x86/hvm/vmx/vmx.c
> >>>> @@ -3032,6 +3032,30 @@ const struct hvm_function_table * __init
> >>>>      return &vmx_function_table;
> >>>>  }
> >>>>  
> >>>> +void __init prune_vmx(void)
> >>>> +{
> >>>> +    /*
> >>>> +     * Now that vmx_function_table was copied, populate all function pointers
> >>>> +     * which may have been left at NULL, for __initdata_cf_clobber to have as
> >>>> +     * much of an effect as possible.
> >>>> +     */
> >>>> +    if ( !IS_ENABLED(CONFIG_XEN_IBT) )
> >>>> +        return;
> >>>> +
> >>>> +    vmx_function_table.set_descriptor_access_exiting =
> >>>> +        vmx_set_descriptor_access_exiting;
> >>>> +
> >>>> +    vmx_function_table.update_eoi_exit_bitmap = vmx_update_eoi_exit_bitmap;
> >>>> +    vmx_function_table.process_isr            = vmx_process_isr;
> >>>> +    vmx_function_table.handle_eoi             = vmx_handle_eoi;
> >>>> +
> >>>> +    vmx_function_table.pi_update_irte = vmx_pi_update_irte;
> >>>> +
> >>>> +    vmx_function_table.deliver_posted_intr = vmx_deliver_posted_intr;
> >>>> +    vmx_function_table.sync_pir_to_irr     = vmx_sync_pir_to_irr;
> >>>> +    vmx_function_table.test_pir            = vmx_test_pir;
> >>
> >> ... this: The goal of having a compile time conditional was to have the
> >> compiler eliminate the code when not needed. Otherwise there's no real
> >> reason to have a conditional there in the first place - we can as well
> >> always install all these pointers.
> > 
> > Maybe do:
> > 
> > if ( !IS_ENABLED(CONFIG_XEN_IBT) || !cpu_has_xen_ibt )
> > 
> > then?
> 
> Maybe. Yet then perhaps cpu_has_xen_ibt might better include the build-time
> check already?

I was wondering about this, yes, might be a better route.

Thanks, Roger.
diff mbox series

Patch

--- a/xen/arch/x86/hvm/hvm.c
+++ b/xen/arch/x86/hvm/hvm.c
@@ -161,10 +161,15 @@  static int __init cf_check hvm_enable(vo
     else if ( cpu_has_svm )
         fns = start_svm();
 
+    if ( fns )
+        hvm_funcs = *fns;
+
+    prune_vmx();
+    prune_svm();
+
     if ( fns == NULL )
         return 0;
 
-    hvm_funcs = *fns;
     hvm_enabled = 1;
 
     printk("HVM: %s enabled\n", fns->name);
--- a/xen/arch/x86/hvm/svm/svm.c
+++ b/xen/arch/x86/hvm/svm/svm.c
@@ -2587,6 +2587,19 @@  const struct hvm_function_table * __init
     return &svm_function_table;
 }
 
+void __init prune_svm(void)
+{
+    /*
+     * Now that svm_function_table was copied, populate all function pointers
+     * which may have been left at NULL, for __initdata_cf_clobber to have as
+     * much of an effect as possible.
+     */
+    if ( !IS_ENABLED(CONFIG_XEN_IBT) )
+        return;
+
+    /* Nothing at present. */
+}
+
 void svm_vmexit_handler(void)
 {
     struct cpu_user_regs *regs = guest_cpu_user_regs();
--- a/xen/arch/x86/hvm/vmx/vmx.c
+++ b/xen/arch/x86/hvm/vmx/vmx.c
@@ -3032,6 +3032,30 @@  const struct hvm_function_table * __init
     return &vmx_function_table;
 }
 
+void __init prune_vmx(void)
+{
+    /*
+     * Now that vmx_function_table was copied, populate all function pointers
+     * which may have been left at NULL, for __initdata_cf_clobber to have as
+     * much of an effect as possible.
+     */
+    if ( !IS_ENABLED(CONFIG_XEN_IBT) )
+        return;
+
+    vmx_function_table.set_descriptor_access_exiting =
+        vmx_set_descriptor_access_exiting;
+
+    vmx_function_table.update_eoi_exit_bitmap = vmx_update_eoi_exit_bitmap;
+    vmx_function_table.process_isr            = vmx_process_isr;
+    vmx_function_table.handle_eoi             = vmx_handle_eoi;
+
+    vmx_function_table.pi_update_irte = vmx_pi_update_irte;
+
+    vmx_function_table.deliver_posted_intr = vmx_deliver_posted_intr;
+    vmx_function_table.sync_pir_to_irr     = vmx_sync_pir_to_irr;
+    vmx_function_table.test_pir            = vmx_test_pir;
+}
+
 /*
  * Not all cases receive valid value in the VM-exit instruction length field.
  * Callers must know what they're doing!
--- a/xen/arch/x86/include/asm/hvm/hvm.h
+++ b/xen/arch/x86/include/asm/hvm/hvm.h
@@ -250,6 +250,9 @@  extern s8 hvm_port80_allowed;
 extern const struct hvm_function_table *start_svm(void);
 extern const struct hvm_function_table *start_vmx(void);
 
+void prune_svm(void);
+void prune_vmx(void);
+
 int hvm_domain_initialise(struct domain *d,
                           const struct xen_domctl_createdomain *config);
 void hvm_domain_relinquish_resources(struct domain *d);