diff mbox series

[Part2,v6,47/49] *fix for stale per-cpu pointer due to cond_resched during ghcb mapping

Message ID 1dbc14735bbc336c171ef8fefd4aef39ddf95816.1655761627.git.ashish.kalra@amd.com (mailing list archive)
State Not Applicable
Delegated to: Herbert Xu
Headers show
Series Add AMD Secure Nested Paging (SEV-SNP) | expand

Commit Message

Kalra, Ashish June 20, 2022, 11:14 p.m. UTC
From: Michael Roth <michael.roth@amd.com>

Signed-off-by: Michael Roth <michael.roth@amd.com>
---
 arch/x86/kvm/svm/svm.c | 6 +++++-
 1 file changed, 5 insertions(+), 1 deletion(-)

Comments

Peter Gonda June 24, 2022, 4:35 p.m. UTC | #1
On Mon, Jun 20, 2022 at 5:15 PM Ashish Kalra <Ashish.Kalra@amd.com> wrote:
>
> From: Michael Roth <michael.roth@amd.com>
>
> Signed-off-by: Michael Roth <michael.roth@amd.com>

Can you add a commit description here? Is this a fix for existing
SEV-ES support or should this be incorporated into a patch in this
series which adds this issue?

> ---
>  arch/x86/kvm/svm/svm.c | 6 +++++-
>  1 file changed, 5 insertions(+), 1 deletion(-)
>
> diff --git a/arch/x86/kvm/svm/svm.c b/arch/x86/kvm/svm/svm.c
> index fced6ea423ad..f78e3b1bde0e 100644
> --- a/arch/x86/kvm/svm/svm.c
> +++ b/arch/x86/kvm/svm/svm.c
> @@ -1352,7 +1352,7 @@ static void svm_vcpu_free(struct kvm_vcpu *vcpu)
>  static void svm_prepare_switch_to_guest(struct kvm_vcpu *vcpu)
>  {
>         struct vcpu_svm *svm = to_svm(vcpu);
> -       struct svm_cpu_data *sd = per_cpu(svm_data, vcpu->cpu);
> +       struct svm_cpu_data *sd;
>
>         if (sev_es_guest(vcpu->kvm))
>                 sev_es_unmap_ghcb(svm);
> @@ -1360,6 +1360,10 @@ static void svm_prepare_switch_to_guest(struct kvm_vcpu *vcpu)
>         if (svm->guest_state_loaded)
>                 return;
>
> +       /* sev_es_unmap_ghcb() can resched, so grab per-cpu pointer afterward. */
> +       barrier();
> +       sd = per_cpu(svm_data, vcpu->cpu);
> +
>         /*
>          * Save additional host state that will be restored on VMEXIT (sev-es)
>          * or subsequent vmload of host save area.
> --
> 2.25.1
>
Kalra, Ashish June 24, 2022, 4:44 p.m. UTC | #2
[AMD Official Use Only - General]

Hello Peter,
>>
>> From: Michael Roth <michael.roth@amd.com>
>>
>> Signed-off-by: Michael Roth <michael.roth@amd.com>

>Can you add a commit description here? Is this a fix for existing SEV-ES support or should this be incorporated into a patch in this series which adds this issue?

This actually fixes issues caused due to preemption happening in svm_prepare_switch_to_guest() when kvm_vcpu_map() is called to map in the GHCB before
entering the guest. 

This is a temporary fix and what we need to do is to prevent getting preempted after vcpu_enter_guest() has disabled preemption, have some ideas about
using gfn_to_pfn_cache() infrastructure to re-use the already mapped GHCB at guest exit, so that we can avoid calling kvm_vcpu_map() to re-map the 
GHCB.

Thanks,
Ashish

> ---
>  arch/x86/kvm/svm/svm.c | 6 +++++-
>  1 file changed, 5 insertions(+), 1 deletion(-)
>
> diff --git a/arch/x86/kvm/svm/svm.c b/arch/x86/kvm/svm/svm.c index 
> fced6ea423ad..f78e3b1bde0e 100644
> --- a/arch/x86/kvm/svm/svm.c
> +++ b/arch/x86/kvm/svm/svm.c
> @@ -1352,7 +1352,7 @@ static void svm_vcpu_free(struct kvm_vcpu *vcpu)  
> static void svm_prepare_switch_to_guest(struct kvm_vcpu *vcpu)  {
>         struct vcpu_svm *svm = to_svm(vcpu);
> -       struct svm_cpu_data *sd = per_cpu(svm_data, vcpu->cpu);
> +       struct svm_cpu_data *sd;
>
>         if (sev_es_guest(vcpu->kvm))
>                 sev_es_unmap_ghcb(svm); @@ -1360,6 +1360,10 @@ static 
> void svm_prepare_switch_to_guest(struct kvm_vcpu *vcpu)
>         if (svm->guest_state_loaded)
>                 return;
>
> +       /* sev_es_unmap_ghcb() can resched, so grab per-cpu pointer afterward. */
> +       barrier();
> +       sd = per_cpu(svm_data, vcpu->cpu);
> +
>         /*
>          * Save additional host state that will be restored on VMEXIT (sev-es)
>          * or subsequent vmload of host save area.
> --
> 2.25.1
>
diff mbox series

Patch

diff --git a/arch/x86/kvm/svm/svm.c b/arch/x86/kvm/svm/svm.c
index fced6ea423ad..f78e3b1bde0e 100644
--- a/arch/x86/kvm/svm/svm.c
+++ b/arch/x86/kvm/svm/svm.c
@@ -1352,7 +1352,7 @@  static void svm_vcpu_free(struct kvm_vcpu *vcpu)
 static void svm_prepare_switch_to_guest(struct kvm_vcpu *vcpu)
 {
 	struct vcpu_svm *svm = to_svm(vcpu);
-	struct svm_cpu_data *sd = per_cpu(svm_data, vcpu->cpu);
+	struct svm_cpu_data *sd;
 
 	if (sev_es_guest(vcpu->kvm))
 		sev_es_unmap_ghcb(svm);
@@ -1360,6 +1360,10 @@  static void svm_prepare_switch_to_guest(struct kvm_vcpu *vcpu)
 	if (svm->guest_state_loaded)
 		return;
 
+	/* sev_es_unmap_ghcb() can resched, so grab per-cpu pointer afterward. */
+	barrier();
+	sd = per_cpu(svm_data, vcpu->cpu);
+
 	/*
 	 * Save additional host state that will be restored on VMEXIT (sev-es)
 	 * or subsequent vmload of host save area.