diff mbox series

target/i386: Remove LBREn bit check when access Arch LBR MSRs

Message ID 20220517155024.33270-1-weijiang.yang@intel.com (mailing list archive)
State New, archived
Headers show
Series target/i386: Remove LBREn bit check when access Arch LBR MSRs | expand

Commit Message

Yang, Weijiang May 17, 2022, 3:50 p.m. UTC
Live migration can happen when Arch LBR LBREn bit is cleared,
e.g., when migration happens after guest entered SMM mode.
In this case, we still need to migrate Arch LBR MSRs.

Signed-off-by: Yang Weijiang <weijiang.yang@intel.com>
---
 target/i386/kvm/kvm.c | 21 +++++++++------------
 1 file changed, 9 insertions(+), 12 deletions(-)


base-commit: 8eccdb9eb84615291faef1257d5779ebfef7a0d0

Comments

Paolo Bonzini May 17, 2022, 5:07 p.m. UTC | #1
On 5/17/22 17:50, Yang Weijiang wrote:
> Live migration can happen when Arch LBR LBREn bit is cleared,
> e.g., when migration happens after guest entered SMM mode.
> In this case, we still need to migrate Arch LBR MSRs.
> 
> Signed-off-by: Yang Weijiang <weijiang.yang@intel.com>
> ---
>   target/i386/kvm/kvm.c | 21 +++++++++------------
>   1 file changed, 9 insertions(+), 12 deletions(-)
> 
> diff --git a/target/i386/kvm/kvm.c b/target/i386/kvm/kvm.c
> index a9ee8eebd7..e2d675115b 100644
> --- a/target/i386/kvm/kvm.c
> +++ b/target/i386/kvm/kvm.c
> @@ -3373,15 +3373,14 @@ static int kvm_put_msrs(X86CPU *cpu, int level)
>               int i, ret;
>   
>               /*
> -             * Only migrate Arch LBR states when: 1) Arch LBR is enabled
> -             * for migrated vcpu. 2) the host Arch LBR depth equals that
> -             * of source guest's, this is to avoid mismatch of guest/host
> -             * config for the msr hence avoid unexpected misbehavior.
> +             * Only migrate Arch LBR states when the host Arch LBR depth
> +             * equals that of source guest's, this is to avoid mismatch
> +             * of guest/host config for the msr hence avoid unexpected
> +             * misbehavior.
>                */
>               ret = kvm_get_one_msr(cpu, MSR_ARCH_LBR_DEPTH, &depth);
>   
> -            if (ret == 1 && (env->msr_lbr_ctl & 0x1) && !!depth &&
> -                depth == env->msr_lbr_depth) {
> +            if (ret == 1 && !!depth && depth == env->msr_lbr_depth) {
>                   kvm_msr_entry_add(cpu, MSR_ARCH_LBR_CTL, env->msr_lbr_ctl);
>                   kvm_msr_entry_add(cpu, MSR_ARCH_LBR_DEPTH, env->msr_lbr_depth);
>   
> @@ -3801,13 +3800,11 @@ static int kvm_get_msrs(X86CPU *cpu)
>   
>       if (kvm_enabled() && cpu->enable_pmu &&
>           (env->features[FEAT_7_0_EDX] & CPUID_7_0_EDX_ARCH_LBR)) {
> -        uint64_t ctl, depth;
> -        int i, ret2;
> +        uint64_t depth;
> +        int i, ret;
>   
> -        ret = kvm_get_one_msr(cpu, MSR_ARCH_LBR_CTL, &ctl);
> -        ret2 = kvm_get_one_msr(cpu, MSR_ARCH_LBR_DEPTH, &depth);
> -        if (ret == 1 && ret2 == 1 && (ctl & 0x1) &&
> -            depth == ARCH_LBR_NR_ENTRIES) {
> +        ret = kvm_get_one_msr(cpu, MSR_ARCH_LBR_DEPTH, &depth);
> +        if (ret == 1 && depth == ARCH_LBR_NR_ENTRIES) {
>               kvm_msr_entry_add(cpu, MSR_ARCH_LBR_CTL, 0);
>               kvm_msr_entry_add(cpu, MSR_ARCH_LBR_DEPTH, 0);
>   
> 
> base-commit: 8eccdb9eb84615291faef1257d5779ebfef7a0d0

Queued, thanks.

Paolo
diff mbox series

Patch

diff --git a/target/i386/kvm/kvm.c b/target/i386/kvm/kvm.c
index a9ee8eebd7..e2d675115b 100644
--- a/target/i386/kvm/kvm.c
+++ b/target/i386/kvm/kvm.c
@@ -3373,15 +3373,14 @@  static int kvm_put_msrs(X86CPU *cpu, int level)
             int i, ret;
 
             /*
-             * Only migrate Arch LBR states when: 1) Arch LBR is enabled
-             * for migrated vcpu. 2) the host Arch LBR depth equals that
-             * of source guest's, this is to avoid mismatch of guest/host
-             * config for the msr hence avoid unexpected misbehavior.
+             * Only migrate Arch LBR states when the host Arch LBR depth
+             * equals that of source guest's, this is to avoid mismatch
+             * of guest/host config for the msr hence avoid unexpected
+             * misbehavior.
              */
             ret = kvm_get_one_msr(cpu, MSR_ARCH_LBR_DEPTH, &depth);
 
-            if (ret == 1 && (env->msr_lbr_ctl & 0x1) && !!depth &&
-                depth == env->msr_lbr_depth) {
+            if (ret == 1 && !!depth && depth == env->msr_lbr_depth) {
                 kvm_msr_entry_add(cpu, MSR_ARCH_LBR_CTL, env->msr_lbr_ctl);
                 kvm_msr_entry_add(cpu, MSR_ARCH_LBR_DEPTH, env->msr_lbr_depth);
 
@@ -3801,13 +3800,11 @@  static int kvm_get_msrs(X86CPU *cpu)
 
     if (kvm_enabled() && cpu->enable_pmu &&
         (env->features[FEAT_7_0_EDX] & CPUID_7_0_EDX_ARCH_LBR)) {
-        uint64_t ctl, depth;
-        int i, ret2;
+        uint64_t depth;
+        int i, ret;
 
-        ret = kvm_get_one_msr(cpu, MSR_ARCH_LBR_CTL, &ctl);
-        ret2 = kvm_get_one_msr(cpu, MSR_ARCH_LBR_DEPTH, &depth);
-        if (ret == 1 && ret2 == 1 && (ctl & 0x1) &&
-            depth == ARCH_LBR_NR_ENTRIES) {
+        ret = kvm_get_one_msr(cpu, MSR_ARCH_LBR_DEPTH, &depth);
+        if (ret == 1 && depth == ARCH_LBR_NR_ENTRIES) {
             kvm_msr_entry_add(cpu, MSR_ARCH_LBR_CTL, 0);
             kvm_msr_entry_add(cpu, MSR_ARCH_LBR_DEPTH, 0);