diff mbox series

[RFC,V3,13/16] x86/sev: Add Check of #HV event in path

Message ID 20230122024607.788454-14-ltykernel@gmail.com (mailing list archive)
State New, archived
Headers show
Series x86/hyperv/sev: Add AMD sev-snp enlightened guest support on hyperv | expand

Commit Message

Tianyu Lan Jan. 22, 2023, 2:46 a.m. UTC
From: Tianyu Lan <tiala@microsoft.com>

Add check_hv_pending() and check_hv_pending_after_irq() to
check queued #HV event when irq is disabled.

Signed-off-by: Tianyu Lan <tiala@microsoft.com>
---
 arch/x86/entry/entry_64.S       | 18 +++++++++++++++
 arch/x86/include/asm/irqflags.h | 10 +++++++++
 arch/x86/kernel/sev.c           | 39 +++++++++++++++++++++++++++++++++
 3 files changed, 67 insertions(+)

Comments

Gupta, Pankaj March 1, 2023, 11:11 a.m. UTC | #1
On 1/22/2023 3:46 AM, Tianyu Lan wrote:
> From: Tianyu Lan <tiala@microsoft.com>
> 
> Add check_hv_pending() and check_hv_pending_after_irq() to
> check queued #HV event when irq is disabled.
> 
> Signed-off-by: Tianyu Lan <tiala@microsoft.com>
> ---
>   arch/x86/entry/entry_64.S       | 18 +++++++++++++++
>   arch/x86/include/asm/irqflags.h | 10 +++++++++
>   arch/x86/kernel/sev.c           | 39 +++++++++++++++++++++++++++++++++
>   3 files changed, 67 insertions(+)
> 
> diff --git a/arch/x86/entry/entry_64.S b/arch/x86/entry/entry_64.S
> index 6baec7653f19..aec8dc4443d1 100644
> --- a/arch/x86/entry/entry_64.S
> +++ b/arch/x86/entry/entry_64.S
> @@ -1064,6 +1064,15 @@ SYM_CODE_END(paranoid_entry)
>    * R15 - old SPEC_CTRL
>    */
>   SYM_CODE_START_LOCAL(paranoid_exit)
> +#ifdef CONFIG_AMD_MEM_ENCRYPT
> +	/*
> +	 * If a #HV was delivered during execution and interrupts were
> +	 * disabled, then check if it can be handled before the iret
> +	 * (which may re-enable interrupts).
> +	 */
> +	mov     %rsp, %rdi
> +	call    check_hv_pending
> +#endif
>   	UNWIND_HINT_REGS
>   
>   	/*
> @@ -1188,6 +1197,15 @@ SYM_CODE_START(error_entry)
>   SYM_CODE_END(error_entry)
>   
>   SYM_CODE_START_LOCAL(error_return)
> +#ifdef CONFIG_AMD_MEM_ENCRYPT
> +	/*
> +	 * If a #HV was delivered during execution and interrupts were
> +	 * disabled, then check if it can be handled before the iret
> +	 * (which may re-enable interrupts).
> +	 */
> +	mov     %rsp, %rdi
> +	call    check_hv_pending
> +#endif
>   	UNWIND_HINT_REGS
>   	DEBUG_ENTRY_ASSERT_IRQS_OFF
>   	testb	$3, CS(%rsp)
> diff --git a/arch/x86/include/asm/irqflags.h b/arch/x86/include/asm/irqflags.h
> index 7793e52d6237..fe46e59168dd 100644
> --- a/arch/x86/include/asm/irqflags.h
> +++ b/arch/x86/include/asm/irqflags.h
> @@ -14,6 +14,10 @@
>   /*
>    * Interrupt control:
>    */
> +#ifdef CONFIG_AMD_MEM_ENCRYPT
> +void check_hv_pending(struct pt_regs *regs);
> +void check_hv_pending_irq_enable(void);
> +#endif
>   
>   /* Declaration required for gcc < 4.9 to prevent -Werror=missing-prototypes */
>   extern inline unsigned long native_save_fl(void);
> @@ -43,12 +47,18 @@ static __always_inline void native_irq_disable(void)
>   static __always_inline void native_irq_enable(void)
>   {
>   	asm volatile("sti": : :"memory");
> +#ifdef CONFIG_AMD_MEM_ENCRYPT
> +	check_hv_pending_irq_enable();
> +#endif
>   }
>   
>   static inline __cpuidle void native_safe_halt(void)
>   {
>   	mds_idle_clear_cpu_buffers();
>   	asm volatile("sti; hlt": : :"memory");
> +#ifdef CONFIG_AMD_MEM_ENCRYPT
> +	check_hv_pending_irq_enable();
> +#endif
>   }
>   
>   static inline __cpuidle void native_halt(void)
> diff --git a/arch/x86/kernel/sev.c b/arch/x86/kernel/sev.c
> index a8862a2eff67..fe5e5e41433d 100644
> --- a/arch/x86/kernel/sev.c
> +++ b/arch/x86/kernel/sev.c
> @@ -179,6 +179,45 @@ void noinstr __sev_es_ist_enter(struct pt_regs *regs)
>   	this_cpu_write(cpu_tss_rw.x86_tss.ist[IST_INDEX_VC], new_ist);
>   }
>   
> +static void do_exc_hv(struct pt_regs *regs)
> +{
> +	/* Handle #HV exception. */
> +}
> +
> +void check_hv_pending(struct pt_regs *regs)
> +{
> +	if (!cc_platform_has(CC_ATTR_GUEST_SEV_SNP))
> +		return;
> +
> +	if ((regs->flags & X86_EFLAGS_IF) == 0)
> +		return;

Will this return and prevent guest from executing NMI's
while irqs are disabled?

Thanks,
Pankaj

> +
> +	do_exc_hv(regs);
> +}
> +
> +void check_hv_pending_irq_enable(void)
> +{
> +	unsigned long flags;
> +	struct pt_regs regs;
> +
> +	if (!cc_platform_has(CC_ATTR_GUEST_SEV_SNP))
> +		return;
> +
> +	memset(&regs, 0, sizeof(struct pt_regs));
> +	asm volatile("movl %%cs, %%eax;" : "=a" (regs.cs));
> +	asm volatile("movl %%ss, %%eax;" : "=a" (regs.ss));
> +	regs.orig_ax = 0xffffffff;
> +	regs.flags = native_save_fl();
> +
> +	/*
> +	 * Disable irq when handle pending #HV events after
> +	 * re-enabling irq.
> +	 */
> +	asm volatile("cli" : : : "memory");
> +	do_exc_hv(&regs);
> +	asm volatile("sti" : : : "memory");
> +}
> +
>   void noinstr __sev_es_ist_exit(void)
>   {
>   	unsigned long ist;
Gupta, Pankaj March 8, 2023, 4:18 p.m. UTC | #2
On 3/1/2023 12:11 PM, Gupta, Pankaj wrote:
> On 1/22/2023 3:46 AM, Tianyu Lan wrote:
>> From: Tianyu Lan <tiala@microsoft.com>
>>
>> Add check_hv_pending() and check_hv_pending_after_irq() to
>> check queued #HV event when irq is disabled.
>>
>> Signed-off-by: Tianyu Lan <tiala@microsoft.com>
>> ---
>>   arch/x86/entry/entry_64.S       | 18 +++++++++++++++
>>   arch/x86/include/asm/irqflags.h | 10 +++++++++
>>   arch/x86/kernel/sev.c           | 39 +++++++++++++++++++++++++++++++++
>>   3 files changed, 67 insertions(+)
>>
>> diff --git a/arch/x86/entry/entry_64.S b/arch/x86/entry/entry_64.S
>> index 6baec7653f19..aec8dc4443d1 100644
>> --- a/arch/x86/entry/entry_64.S
>> +++ b/arch/x86/entry/entry_64.S
>> @@ -1064,6 +1064,15 @@ SYM_CODE_END(paranoid_entry)
>>    * R15 - old SPEC_CTRL
>>    */
>>   SYM_CODE_START_LOCAL(paranoid_exit)
>> +#ifdef CONFIG_AMD_MEM_ENCRYPT
>> +    /*
>> +     * If a #HV was delivered during execution and interrupts were
>> +     * disabled, then check if it can be handled before the iret
>> +     * (which may re-enable interrupts).
>> +     */
>> +    mov     %rsp, %rdi
>> +    call    check_hv_pending
>> +#endif
>>       UNWIND_HINT_REGS
>>       /*
>> @@ -1188,6 +1197,15 @@ SYM_CODE_START(error_entry)
>>   SYM_CODE_END(error_entry)
>>   SYM_CODE_START_LOCAL(error_return)
>> +#ifdef CONFIG_AMD_MEM_ENCRYPT
>> +    /*
>> +     * If a #HV was delivered during execution and interrupts were
>> +     * disabled, then check if it can be handled before the iret
>> +     * (which may re-enable interrupts).
>> +     */
>> +    mov     %rsp, %rdi
>> +    call    check_hv_pending
>> +#endif
>>       UNWIND_HINT_REGS
>>       DEBUG_ENTRY_ASSERT_IRQS_OFF
>>       testb    $3, CS(%rsp)
>> diff --git a/arch/x86/include/asm/irqflags.h 
>> b/arch/x86/include/asm/irqflags.h
>> index 7793e52d6237..fe46e59168dd 100644
>> --- a/arch/x86/include/asm/irqflags.h
>> +++ b/arch/x86/include/asm/irqflags.h
>> @@ -14,6 +14,10 @@
>>   /*
>>    * Interrupt control:
>>    */
>> +#ifdef CONFIG_AMD_MEM_ENCRYPT
>> +void check_hv_pending(struct pt_regs *regs);
>> +void check_hv_pending_irq_enable(void);
>> +#endif
>>   /* Declaration required for gcc < 4.9 to prevent 
>> -Werror=missing-prototypes */
>>   extern inline unsigned long native_save_fl(void);
>> @@ -43,12 +47,18 @@ static __always_inline void native_irq_disable(void)
>>   static __always_inline void native_irq_enable(void)
>>   {
>>       asm volatile("sti": : :"memory");
>> +#ifdef CONFIG_AMD_MEM_ENCRYPT
>> +    check_hv_pending_irq_enable();
>> +#endif
>>   }
>>   static inline __cpuidle void native_safe_halt(void)
>>   {
>>       mds_idle_clear_cpu_buffers();
>>       asm volatile("sti; hlt": : :"memory");
>> +#ifdef CONFIG_AMD_MEM_ENCRYPT
>> +    check_hv_pending_irq_enable();
>> +#endif
>>   }
>>   static inline __cpuidle void native_halt(void)
>> diff --git a/arch/x86/kernel/sev.c b/arch/x86/kernel/sev.c
>> index a8862a2eff67..fe5e5e41433d 100644
>> --- a/arch/x86/kernel/sev.c
>> +++ b/arch/x86/kernel/sev.c
>> @@ -179,6 +179,45 @@ void noinstr __sev_es_ist_enter(struct pt_regs 
>> *regs)
>>       this_cpu_write(cpu_tss_rw.x86_tss.ist[IST_INDEX_VC], new_ist);
>>   }
>> +static void do_exc_hv(struct pt_regs *regs)
>> +{
>> +    /* Handle #HV exception. */
>> +}
>> +
>> +void check_hv_pending(struct pt_regs *regs)
>> +{
>> +    if (!cc_platform_has(CC_ATTR_GUEST_SEV_SNP))
>> +        return;
>> +
>> +    if ((regs->flags & X86_EFLAGS_IF) == 0)
>> +        return;
> 
> Will this return and prevent guest from executing NMI's
> while irqs are disabled?

I think we need to handle NMI's even when irqs are disabled.

As we reset "no_further_signal" in hv_raw_handle_exception()
and return from check_hv_pending() when irqs are disabled, this
can result in loss/delay of NMI event?

Thanks,
Pankaj
Tianyu Lan March 10, 2023, 3:59 p.m. UTC | #3
On 3/9/2023 12:18 AM, Gupta, Pankaj wrote:
> On 3/1/2023 12:11 PM, Gupta, Pankaj wrote:
>> On 1/22/2023 3:46 AM, Tianyu Lan wrote:

>>> diff --git a/arch/x86/kernel/sev.c b/arch/x86/kernel/sev.c
>>> index a8862a2eff67..fe5e5e41433d 100644
>>> --- a/arch/x86/kernel/sev.c
>>> +++ b/arch/x86/kernel/sev.c
>>> @@ -179,6 +179,45 @@ void noinstr __sev_es_ist_enter(struct pt_regs 
>>> *regs)
>>>       this_cpu_write(cpu_tss_rw.x86_tss.ist[IST_INDEX_VC], new_ist);
>>>   }
>>> +static void do_exc_hv(struct pt_regs *regs)
>>> +{
>>> +    /* Handle #HV exception. */
>>> +}
>>> +
>>> +void check_hv_pending(struct pt_regs *regs)
>>> +{
>>> +    if (!cc_platform_has(CC_ATTR_GUEST_SEV_SNP))
>>> +        return;
>>> +
>>> +    if ((regs->flags & X86_EFLAGS_IF) == 0)
>>> +        return;
>>
>> Will this return and prevent guest from executing NMI's
>> while irqs are disabled?
> 
> I think we need to handle NMI's even when irqs are disabled.
> 

Yes, nice catch!

> As we reset "no_further_signal" in hv_raw_handle_exception()
> and return from check_hv_pending() when irqs are disabled, this
> can result in loss/delay of NMI event?

Will fix this in the next version.

Thanks.
diff mbox series

Patch

diff --git a/arch/x86/entry/entry_64.S b/arch/x86/entry/entry_64.S
index 6baec7653f19..aec8dc4443d1 100644
--- a/arch/x86/entry/entry_64.S
+++ b/arch/x86/entry/entry_64.S
@@ -1064,6 +1064,15 @@  SYM_CODE_END(paranoid_entry)
  * R15 - old SPEC_CTRL
  */
 SYM_CODE_START_LOCAL(paranoid_exit)
+#ifdef CONFIG_AMD_MEM_ENCRYPT
+	/*
+	 * If a #HV was delivered during execution and interrupts were
+	 * disabled, then check if it can be handled before the iret
+	 * (which may re-enable interrupts).
+	 */
+	mov     %rsp, %rdi
+	call    check_hv_pending
+#endif
 	UNWIND_HINT_REGS
 
 	/*
@@ -1188,6 +1197,15 @@  SYM_CODE_START(error_entry)
 SYM_CODE_END(error_entry)
 
 SYM_CODE_START_LOCAL(error_return)
+#ifdef CONFIG_AMD_MEM_ENCRYPT
+	/*
+	 * If a #HV was delivered during execution and interrupts were
+	 * disabled, then check if it can be handled before the iret
+	 * (which may re-enable interrupts).
+	 */
+	mov     %rsp, %rdi
+	call    check_hv_pending
+#endif
 	UNWIND_HINT_REGS
 	DEBUG_ENTRY_ASSERT_IRQS_OFF
 	testb	$3, CS(%rsp)
diff --git a/arch/x86/include/asm/irqflags.h b/arch/x86/include/asm/irqflags.h
index 7793e52d6237..fe46e59168dd 100644
--- a/arch/x86/include/asm/irqflags.h
+++ b/arch/x86/include/asm/irqflags.h
@@ -14,6 +14,10 @@ 
 /*
  * Interrupt control:
  */
+#ifdef CONFIG_AMD_MEM_ENCRYPT
+void check_hv_pending(struct pt_regs *regs);
+void check_hv_pending_irq_enable(void);
+#endif
 
 /* Declaration required for gcc < 4.9 to prevent -Werror=missing-prototypes */
 extern inline unsigned long native_save_fl(void);
@@ -43,12 +47,18 @@  static __always_inline void native_irq_disable(void)
 static __always_inline void native_irq_enable(void)
 {
 	asm volatile("sti": : :"memory");
+#ifdef CONFIG_AMD_MEM_ENCRYPT
+	check_hv_pending_irq_enable();
+#endif
 }
 
 static inline __cpuidle void native_safe_halt(void)
 {
 	mds_idle_clear_cpu_buffers();
 	asm volatile("sti; hlt": : :"memory");
+#ifdef CONFIG_AMD_MEM_ENCRYPT
+	check_hv_pending_irq_enable();
+#endif
 }
 
 static inline __cpuidle void native_halt(void)
diff --git a/arch/x86/kernel/sev.c b/arch/x86/kernel/sev.c
index a8862a2eff67..fe5e5e41433d 100644
--- a/arch/x86/kernel/sev.c
+++ b/arch/x86/kernel/sev.c
@@ -179,6 +179,45 @@  void noinstr __sev_es_ist_enter(struct pt_regs *regs)
 	this_cpu_write(cpu_tss_rw.x86_tss.ist[IST_INDEX_VC], new_ist);
 }
 
+static void do_exc_hv(struct pt_regs *regs)
+{
+	/* Handle #HV exception. */
+}
+
+void check_hv_pending(struct pt_regs *regs)
+{
+	if (!cc_platform_has(CC_ATTR_GUEST_SEV_SNP))
+		return;
+
+	if ((regs->flags & X86_EFLAGS_IF) == 0)
+		return;
+
+	do_exc_hv(regs);
+}
+
+void check_hv_pending_irq_enable(void)
+{
+	unsigned long flags;
+	struct pt_regs regs;
+
+	if (!cc_platform_has(CC_ATTR_GUEST_SEV_SNP))
+		return;
+
+	memset(&regs, 0, sizeof(struct pt_regs));
+	asm volatile("movl %%cs, %%eax;" : "=a" (regs.cs));
+	asm volatile("movl %%ss, %%eax;" : "=a" (regs.ss));
+	regs.orig_ax = 0xffffffff;
+	regs.flags = native_save_fl();
+
+	/*
+	 * Disable irq when handle pending #HV events after
+	 * re-enabling irq.
+	 */
+	asm volatile("cli" : : : "memory");
+	do_exc_hv(&regs);
+	asm volatile("sti" : : : "memory");
+}
+
 void noinstr __sev_es_ist_exit(void)
 {
 	unsigned long ist;