diff mbox

KVM: x86: Add support for save/load MSR_SMI_COUNT

Message ID 1519726932-13833-1-git-send-email-liran.alon@oracle.com (mailing list archive)
State New, archived
Headers show

Commit Message

Liran Alon Feb. 27, 2018, 10:22 a.m. UTC
This MSR returns the number of #SMIs that occurred on
CPU since boot.

KVM commit 52797bf9a875 ("KVM: x86: Add emulation of MSR_SMI_COUNT")
introduced support for emulating this MSR.

This commit adds support for QEMU to save/load this
MSR for migration purposes.

Signed-off-by: Liran Alon <liran.alon@oracle.com>
Reviewed-by: Konrad Rzeszutek Wilk <konrad.wilk@oracle.com>
Signed-off-by: Konrad Rzeszutek Wilk <konrad.wilk@oracle.com>
---
 target/i386/cpu.c     |  1 +
 target/i386/cpu.h     |  3 +++
 target/i386/kvm.c     | 13 +++++++++++++
 target/i386/machine.c | 20 ++++++++++++++++++++
 4 files changed, 37 insertions(+)

Comments

Dr. David Alan Gilbert July 24, 2018, 11:29 a.m. UTC | #1
* Liran Alon (liran.alon@oracle.com) wrote:
> This MSR returns the number of #SMIs that occurred on
> CPU since boot.
> 
> KVM commit 52797bf9a875 ("KVM: x86: Add emulation of MSR_SMI_COUNT")
> introduced support for emulating this MSR.
> 
> This commit adds support for QEMU to save/load this
> MSR for migration purposes.
> 
> Signed-off-by: Liran Alon <liran.alon@oracle.com>
> Reviewed-by: Konrad Rzeszutek Wilk <konrad.wilk@oracle.com>
> Signed-off-by: Konrad Rzeszutek Wilk <konrad.wilk@oracle.com>

Note this breaks 2.12->2.11 migration if SMM is enabled
with a:

qemu-system-x86_64: error while loading state for instance 0x0 of device 'cpu'

Dave

> ---
>  target/i386/cpu.c     |  1 +
>  target/i386/cpu.h     |  3 +++
>  target/i386/kvm.c     | 13 +++++++++++++
>  target/i386/machine.c | 20 ++++++++++++++++++++
>  4 files changed, 37 insertions(+)
> 
> diff --git a/target/i386/cpu.c b/target/i386/cpu.c
> index b5e431e769da..ba9ec6a6116b 100644
> --- a/target/i386/cpu.c
> +++ b/target/i386/cpu.c
> @@ -3645,6 +3645,7 @@ static void x86_cpu_reset(CPUState *s)
>      cpu_x86_update_cr0(env, 0x60000010);
>      env->a20_mask = ~0x0;
>      env->smbase = 0x30000;
> +    env->msr_smi_count = 0;
>  
>      env->idt.limit = 0xffff;
>      env->gdt.limit = 0xffff;
> diff --git a/target/i386/cpu.h b/target/i386/cpu.h
> index faf39ec1ce77..254e557bb8fa 100644
> --- a/target/i386/cpu.h
> +++ b/target/i386/cpu.h
> @@ -1,3 +1,4 @@
> +
>  /*
>   * i386 virtual CPU header
>   *
> @@ -359,6 +360,7 @@ typedef enum X86Seg {
>  #define MSR_P6_PERFCTR0                 0xc1
>  
>  #define MSR_IA32_SMBASE                 0x9e
> +#define MSR_SMI_COUNT                   0x34
>  #define MSR_MTRRcap                     0xfe
>  #define MSR_MTRRcap_VCNT                8
>  #define MSR_MTRRcap_FIXRANGE_SUPPORT    (1 << 8)
> @@ -1123,6 +1125,7 @@ typedef struct CPUX86State {
>  
>      uint64_t pat;
>      uint32_t smbase;
> +    uint64_t msr_smi_count;
>  
>      uint32_t pkru;
>  
> diff --git a/target/i386/kvm.c b/target/i386/kvm.c
> index ad4b159b28af..a53735f266c5 100644
> --- a/target/i386/kvm.c
> +++ b/target/i386/kvm.c
> @@ -92,6 +92,7 @@ static bool has_msr_hv_stimer;
>  static bool has_msr_hv_frequencies;
>  static bool has_msr_xss;
>  static bool has_msr_spec_ctrl;
> +static bool has_msr_smi_count;
>  
>  static uint32_t has_architectural_pmu_version;
>  static uint32_t num_architectural_pmu_gp_counters;
> @@ -1124,6 +1125,9 @@ static int kvm_get_supported_msrs(KVMState *s)
>                  case MSR_IA32_SMBASE:
>                      has_msr_smbase = true;
>                      break;
> +                case MSR_SMI_COUNT:
> +                    has_msr_smi_count = true;
> +                    break;
>                  case MSR_IA32_MISC_ENABLE:
>                      has_msr_misc_enable = true;
>                      break;
> @@ -1633,6 +1637,9 @@ static int kvm_put_msrs(X86CPU *cpu, int level)
>      if (has_msr_smbase) {
>          kvm_msr_entry_add(cpu, MSR_IA32_SMBASE, env->smbase);
>      }
> +    if (has_msr_smi_count) {
> +        kvm_msr_entry_add(cpu, MSR_SMI_COUNT, env->msr_smi_count);
> +    }
>      if (has_msr_bndcfgs) {
>          kvm_msr_entry_add(cpu, MSR_IA32_BNDCFGS, env->msr_bndcfgs);
>      }
> @@ -1979,6 +1986,9 @@ static int kvm_get_msrs(X86CPU *cpu)
>      if (has_msr_smbase) {
>          kvm_msr_entry_add(cpu, MSR_IA32_SMBASE, 0);
>      }
> +    if (has_msr_smi_count) {
> +        kvm_msr_entry_add(cpu, MSR_SMI_COUNT, 0);
> +    }
>      if (has_msr_feature_control) {
>          kvm_msr_entry_add(cpu, MSR_IA32_FEATURE_CONTROL, 0);
>      }
> @@ -2205,6 +2215,9 @@ static int kvm_get_msrs(X86CPU *cpu)
>          case MSR_IA32_SMBASE:
>              env->smbase = msrs[i].data;
>              break;
> +        case MSR_SMI_COUNT:
> +            env->msr_smi_count = msrs[i].data;
> +            break;
>          case MSR_IA32_FEATURE_CONTROL:
>              env->msr_ia32_feature_control = msrs[i].data;
>              break;
> diff --git a/target/i386/machine.c b/target/i386/machine.c
> index 361c05aedfdc..9432496cbda8 100644
> --- a/target/i386/machine.c
> +++ b/target/i386/machine.c
> @@ -395,6 +395,25 @@ static const VMStateDescription vmstate_msr_tsc_adjust = {
>      }
>  };
>  
> +static bool msr_smi_count_needed(void *opaque)
> +{
> +    X86CPU *cpu = opaque;
> +    CPUX86State *env = &cpu->env;
> +
> +    return env->msr_smi_count != 0;
> +}
> +
> +static const VMStateDescription vmstate_msr_smi_count = {
> +    .name = "cpu/msr_smi_count",
> +    .version_id = 1,
> +    .minimum_version_id = 1,
> +    .needed = msr_smi_count_needed,
> +    .fields = (VMStateField[]) {
> +        VMSTATE_UINT64(env.msr_smi_count, X86CPU),
> +        VMSTATE_END_OF_LIST()
> +    }
> +};
> +
>  static bool tscdeadline_needed(void *opaque)
>  {
>      X86CPU *cpu = opaque;
> @@ -952,6 +971,7 @@ VMStateDescription vmstate_x86_cpu = {
>          &vmstate_avx512,
>          &vmstate_xss,
>          &vmstate_tsc_khz,
> +        &vmstate_msr_smi_count,
>  #ifdef TARGET_X86_64
>          &vmstate_pkru,
>  #endif
> -- 
> 1.9.1
> 
> 
--
Dr. David Alan Gilbert / dgilbert@redhat.com / Manchester, UK
Eduardo Habkost July 24, 2018, 2:39 p.m. UTC | #2
On Tue, Jul 24, 2018 at 12:29:12PM +0100, Dr. David Alan Gilbert wrote:
> * Liran Alon (liran.alon@oracle.com) wrote:
> > This MSR returns the number of #SMIs that occurred on
> > CPU since boot.
> > 
> > KVM commit 52797bf9a875 ("KVM: x86: Add emulation of MSR_SMI_COUNT")
> > introduced support for emulating this MSR.
> > 
> > This commit adds support for QEMU to save/load this
> > MSR for migration purposes.
> > 
> > Signed-off-by: Liran Alon <liran.alon@oracle.com>
> > Reviewed-by: Konrad Rzeszutek Wilk <konrad.wilk@oracle.com>
> > Signed-off-by: Konrad Rzeszutek Wilk <konrad.wilk@oracle.com>
> 
> Note this breaks 2.12->2.11 migration if SMM is enabled
> with a:
> 
> qemu-system-x86_64: error while loading state for instance 0x0 of device 'cpu'

Right, the MSR can't be enabled unconditionally.

It looks like there's no CPUID bit for reporting the MSR as
available?  How exactly would guests know if the MSR is really
safe to use?
Paolo Bonzini July 24, 2018, 2:40 p.m. UTC | #3
On 24/07/2018 16:39, Eduardo Habkost wrote:
> On Tue, Jul 24, 2018 at 12:29:12PM +0100, Dr. David Alan Gilbert wrote:
>> * Liran Alon (liran.alon@oracle.com) wrote:
>>> This MSR returns the number of #SMIs that occurred on
>>> CPU since boot.
>>>
>>> KVM commit 52797bf9a875 ("KVM: x86: Add emulation of MSR_SMI_COUNT")
>>> introduced support for emulating this MSR.
>>>
>>> This commit adds support for QEMU to save/load this
>>> MSR for migration purposes.
>>>
>>> Signed-off-by: Liran Alon <liran.alon@oracle.com>
>>> Reviewed-by: Konrad Rzeszutek Wilk <konrad.wilk@oracle.com>
>>> Signed-off-by: Konrad Rzeszutek Wilk <konrad.wilk@oracle.com>
>>
>> Note this breaks 2.12->2.11 migration if SMM is enabled
>> with a:
>>
>> qemu-system-x86_64: error while loading state for instance 0x0 of device 'cpu'
> 
> Right, the MSR can't be enabled unconditionally.
> 
> It looks like there's no CPUID bit for reporting the MSR as
> available?  How exactly would guests know if the MSR is really
> safe to use?

As far as we know, the only guest that uses it is ESX.  Like most other
MSRs, the guest should in general be ready for it to cause a #GP fault.

Paolo
Eduardo Habkost July 24, 2018, 2:48 p.m. UTC | #4
On Tue, Jul 24, 2018 at 04:40:15PM +0200, Paolo Bonzini wrote:
> On 24/07/2018 16:39, Eduardo Habkost wrote:
> > On Tue, Jul 24, 2018 at 12:29:12PM +0100, Dr. David Alan Gilbert wrote:
> >> * Liran Alon (liran.alon@oracle.com) wrote:
> >>> This MSR returns the number of #SMIs that occurred on
> >>> CPU since boot.
> >>>
> >>> KVM commit 52797bf9a875 ("KVM: x86: Add emulation of MSR_SMI_COUNT")
> >>> introduced support for emulating this MSR.
> >>>
> >>> This commit adds support for QEMU to save/load this
> >>> MSR for migration purposes.
> >>>
> >>> Signed-off-by: Liran Alon <liran.alon@oracle.com>
> >>> Reviewed-by: Konrad Rzeszutek Wilk <konrad.wilk@oracle.com>
> >>> Signed-off-by: Konrad Rzeszutek Wilk <konrad.wilk@oracle.com>
> >>
> >> Note this breaks 2.12->2.11 migration if SMM is enabled
> >> with a:
> >>
> >> qemu-system-x86_64: error while loading state for instance 0x0 of device 'cpu'
> > 
> > Right, the MSR can't be enabled unconditionally.
> > 
> > It looks like there's no CPUID bit for reporting the MSR as
> > available?  How exactly would guests know if the MSR is really
> > safe to use?
> 
> As far as we know, the only guest that uses it is ESX.  Like most other
> MSRs, the guest should in general be ready for it to cause a #GP fault.

True, guests are normally ready to handle both cases: 1) MSR
working as in bare metal; 2) MSR reads causing a #GP fault.

But with the current KVM code, old machine-types can't implement
either of those cases, but only a 3rd option: MSR read won't
cause #GP, but MSR can unexpectedly reset due to live migration.
Are guests ready to handle that?
diff mbox

Patch

diff --git a/target/i386/cpu.c b/target/i386/cpu.c
index b5e431e769da..ba9ec6a6116b 100644
--- a/target/i386/cpu.c
+++ b/target/i386/cpu.c
@@ -3645,6 +3645,7 @@  static void x86_cpu_reset(CPUState *s)
     cpu_x86_update_cr0(env, 0x60000010);
     env->a20_mask = ~0x0;
     env->smbase = 0x30000;
+    env->msr_smi_count = 0;
 
     env->idt.limit = 0xffff;
     env->gdt.limit = 0xffff;
diff --git a/target/i386/cpu.h b/target/i386/cpu.h
index faf39ec1ce77..254e557bb8fa 100644
--- a/target/i386/cpu.h
+++ b/target/i386/cpu.h
@@ -1,3 +1,4 @@ 
+
 /*
  * i386 virtual CPU header
  *
@@ -359,6 +360,7 @@  typedef enum X86Seg {
 #define MSR_P6_PERFCTR0                 0xc1
 
 #define MSR_IA32_SMBASE                 0x9e
+#define MSR_SMI_COUNT                   0x34
 #define MSR_MTRRcap                     0xfe
 #define MSR_MTRRcap_VCNT                8
 #define MSR_MTRRcap_FIXRANGE_SUPPORT    (1 << 8)
@@ -1123,6 +1125,7 @@  typedef struct CPUX86State {
 
     uint64_t pat;
     uint32_t smbase;
+    uint64_t msr_smi_count;
 
     uint32_t pkru;
 
diff --git a/target/i386/kvm.c b/target/i386/kvm.c
index ad4b159b28af..a53735f266c5 100644
--- a/target/i386/kvm.c
+++ b/target/i386/kvm.c
@@ -92,6 +92,7 @@  static bool has_msr_hv_stimer;
 static bool has_msr_hv_frequencies;
 static bool has_msr_xss;
 static bool has_msr_spec_ctrl;
+static bool has_msr_smi_count;
 
 static uint32_t has_architectural_pmu_version;
 static uint32_t num_architectural_pmu_gp_counters;
@@ -1124,6 +1125,9 @@  static int kvm_get_supported_msrs(KVMState *s)
                 case MSR_IA32_SMBASE:
                     has_msr_smbase = true;
                     break;
+                case MSR_SMI_COUNT:
+                    has_msr_smi_count = true;
+                    break;
                 case MSR_IA32_MISC_ENABLE:
                     has_msr_misc_enable = true;
                     break;
@@ -1633,6 +1637,9 @@  static int kvm_put_msrs(X86CPU *cpu, int level)
     if (has_msr_smbase) {
         kvm_msr_entry_add(cpu, MSR_IA32_SMBASE, env->smbase);
     }
+    if (has_msr_smi_count) {
+        kvm_msr_entry_add(cpu, MSR_SMI_COUNT, env->msr_smi_count);
+    }
     if (has_msr_bndcfgs) {
         kvm_msr_entry_add(cpu, MSR_IA32_BNDCFGS, env->msr_bndcfgs);
     }
@@ -1979,6 +1986,9 @@  static int kvm_get_msrs(X86CPU *cpu)
     if (has_msr_smbase) {
         kvm_msr_entry_add(cpu, MSR_IA32_SMBASE, 0);
     }
+    if (has_msr_smi_count) {
+        kvm_msr_entry_add(cpu, MSR_SMI_COUNT, 0);
+    }
     if (has_msr_feature_control) {
         kvm_msr_entry_add(cpu, MSR_IA32_FEATURE_CONTROL, 0);
     }
@@ -2205,6 +2215,9 @@  static int kvm_get_msrs(X86CPU *cpu)
         case MSR_IA32_SMBASE:
             env->smbase = msrs[i].data;
             break;
+        case MSR_SMI_COUNT:
+            env->msr_smi_count = msrs[i].data;
+            break;
         case MSR_IA32_FEATURE_CONTROL:
             env->msr_ia32_feature_control = msrs[i].data;
             break;
diff --git a/target/i386/machine.c b/target/i386/machine.c
index 361c05aedfdc..9432496cbda8 100644
--- a/target/i386/machine.c
+++ b/target/i386/machine.c
@@ -395,6 +395,25 @@  static const VMStateDescription vmstate_msr_tsc_adjust = {
     }
 };
 
+static bool msr_smi_count_needed(void *opaque)
+{
+    X86CPU *cpu = opaque;
+    CPUX86State *env = &cpu->env;
+
+    return env->msr_smi_count != 0;
+}
+
+static const VMStateDescription vmstate_msr_smi_count = {
+    .name = "cpu/msr_smi_count",
+    .version_id = 1,
+    .minimum_version_id = 1,
+    .needed = msr_smi_count_needed,
+    .fields = (VMStateField[]) {
+        VMSTATE_UINT64(env.msr_smi_count, X86CPU),
+        VMSTATE_END_OF_LIST()
+    }
+};
+
 static bool tscdeadline_needed(void *opaque)
 {
     X86CPU *cpu = opaque;
@@ -952,6 +971,7 @@  VMStateDescription vmstate_x86_cpu = {
         &vmstate_avx512,
         &vmstate_xss,
         &vmstate_tsc_khz,
+        &vmstate_msr_smi_count,
 #ifdef TARGET_X86_64
         &vmstate_pkru,
 #endif