diff mbox series

[2/2] kvm: x86: differentiate unrecognized MSRs from errors

Message ID 20180829234241.103002-3-pshier@google.com (mailing list archive)
State New, archived
Headers show
Series Propagate accurate MSR access failures to userspace | expand

Commit Message

Peter Shier Aug. 29, 2018, 11:42 p.m. UTC
Change all MSR access code that handles an MSR range to return -ENOENT if
the specified MSR is out of range. Does not affect KVM_GET/SET_MSRS return
values to preserve existing IOCTL API contract. Does not affect existing
guest behavior on unrecognized MSR (#GP).

Signed-off-by: Peter Shier <pshier@google.com>
Reviewed-by: Jim Mattson <jmattson@google.com>
---
 arch/x86/kvm/hyperv.c    | 16 ++++++++--------
 arch/x86/kvm/lapic.c     |  2 +-
 arch/x86/kvm/mtrr.c      | 10 +++++++---
 arch/x86/kvm/pmu_amd.c   | 24 +++++++++++++-----------
 arch/x86/kvm/pmu_intel.c |  5 +++--
 arch/x86/kvm/vmx.c       |  4 ++--
 arch/x86/kvm/x86.c       |  8 ++++----
 7 files changed, 38 insertions(+), 31 deletions(-)
diff mbox series

Patch

diff --git a/arch/x86/kvm/hyperv.c b/arch/x86/kvm/hyperv.c
index 01d209ab5481b..69d93672c68a3 100644
--- a/arch/x86/kvm/hyperv.c
+++ b/arch/x86/kvm/hyperv.c
@@ -289,7 +289,7 @@  static int synic_set_msr(struct kvm_vcpu_hv_synic *synic,
 		ret = synic_set_sint(synic, msr - HV_X64_MSR_SINT0, data, host);
 		break;
 	default:
-		ret = 1;
+		ret = -ENOENT;
 		break;
 	}
 	return ret;
@@ -324,7 +324,7 @@  static int synic_get_msr(struct kvm_vcpu_hv_synic *synic, u32 msr, u64 *pdata,
 		*pdata = atomic64_read(&synic->sint[msr - HV_X64_MSR_SINT0]);
 		break;
 	default:
-		ret = 1;
+		ret = -ENOENT;
 		break;
 	}
 	return ret;
@@ -774,7 +774,7 @@  static int kvm_hv_msr_get_crash_data(struct kvm_vcpu *vcpu,
 	struct kvm_hv *hv = &vcpu->kvm->arch.hyperv;
 
 	if (WARN_ON_ONCE(index >= ARRAY_SIZE(hv->hv_crash_param)))
-		return -EINVAL;
+		return -ENOENT;
 
 	*pdata = hv->hv_crash_param[index];
 	return 0;
@@ -817,7 +817,7 @@  static int kvm_hv_msr_set_crash_data(struct kvm_vcpu *vcpu,
 	struct kvm_hv *hv = &vcpu->kvm->arch.hyperv;
 
 	if (WARN_ON_ONCE(index >= ARRAY_SIZE(hv->hv_crash_param)))
-		return -EINVAL;
+		return -ENOENT;
 
 	hv->hv_crash_param[index] = data;
 	return 0;
@@ -1023,7 +1023,7 @@  static int kvm_hv_set_msr_pw(struct kvm_vcpu *vcpu, u32 msr, u64 data,
 	default:
 		vcpu_unimpl(vcpu, "Hyper-V uhandled wrmsr: 0x%x data 0x%llx\n",
 			    msr, data);
-		return 1;
+		return -ENOENT;
 	}
 	return 0;
 }
@@ -1116,7 +1116,7 @@  static int kvm_hv_set_msr(struct kvm_vcpu *vcpu, u32 msr, u64 data, bool host)
 	default:
 		vcpu_unimpl(vcpu, "Hyper-V uhandled wrmsr: 0x%x data 0x%llx\n",
 			    msr, data);
-		return 1;
+		return -ENOENT;
 	}
 
 	return 0;
@@ -1161,7 +1161,7 @@  static int kvm_hv_get_msr_pw(struct kvm_vcpu *vcpu, u32 msr, u64 *pdata)
 		break;
 	default:
 		vcpu_unimpl(vcpu, "Hyper-V unhandled rdmsr: 0x%x\n", msr);
-		return 1;
+		return -ENOENT;
 	}
 
 	*pdata = data;
@@ -1223,7 +1223,7 @@  static int kvm_hv_get_msr(struct kvm_vcpu *vcpu, u32 msr, u64 *pdata,
 		break;
 	default:
 		vcpu_unimpl(vcpu, "Hyper-V unhandled rdmsr: 0x%x\n", msr);
-		return 1;
+		return -ENOENT;
 	}
 	*pdata = data;
 	return 0;
diff --git a/arch/x86/kvm/lapic.c b/arch/x86/kvm/lapic.c
index 0cefba28c864a..a8fb1c618bf54 100644
--- a/arch/x86/kvm/lapic.c
+++ b/arch/x86/kvm/lapic.c
@@ -1885,7 +1885,7 @@  int kvm_lapic_reg_write(struct kvm_lapic *apic, u32 reg, u32 val)
 			ret = 1;
 		break;
 	default:
-		ret = 1;
+		ret = -ENOENT;
 		break;
 	}
 	if (ret)
diff --git a/arch/x86/kvm/mtrr.c b/arch/x86/kvm/mtrr.c
index e9ea2d45ae66b..28fd39c30ba50 100644
--- a/arch/x86/kvm/mtrr.c
+++ b/arch/x86/kvm/mtrr.c
@@ -383,8 +383,12 @@  int kvm_mtrr_set_msr(struct kvm_vcpu *vcpu, u32 msr, u64 data)
 {
 	int index;
 
-	if (!kvm_mtrr_valid(vcpu, msr, data))
-		return 1;
+	if (!kvm_mtrr_valid(vcpu, msr, data)) {
+		if (!msr_mtrr_valid(msr))
+			return -ENOENT;
+		else
+			return 1;
+	}
 
 	index = fixed_msr_to_range_index(msr);
 	if (index >= 0)
@@ -417,7 +421,7 @@  int kvm_mtrr_get_msr(struct kvm_vcpu *vcpu, u32 msr, u64 *pdata)
 	}
 
 	if (!msr_mtrr_valid(msr))
-		return 1;
+		return -ENOENT;
 
 	index = fixed_msr_to_range_index(msr);
 	if (index >= 0)
diff --git a/arch/x86/kvm/pmu_amd.c b/arch/x86/kvm/pmu_amd.c
index 1495a735b38e7..ad9f8462a7da6 100644
--- a/arch/x86/kvm/pmu_amd.c
+++ b/arch/x86/kvm/pmu_amd.c
@@ -228,7 +228,7 @@  static int amd_pmu_get_msr(struct kvm_vcpu *vcpu, u32 msr, u64 *data)
 		return 0;
 	}
 
-	return 1;
+	return -ENOENT;
 }
 
 static int amd_pmu_set_msr(struct kvm_vcpu *vcpu, struct msr_data *msr_info)
@@ -243,16 +243,18 @@  static int amd_pmu_set_msr(struct kvm_vcpu *vcpu, struct msr_data *msr_info)
 	if (pmc) {
 		pmc->counter += data - pmc_read_counter(pmc);
 		return 0;
-	}
-	/* MSR_EVNTSELn */
-	pmc = get_gp_pmc_amd(pmu, msr, PMU_TYPE_EVNTSEL);
-	if (pmc) {
-		if (data == pmc->eventsel)
-			return 0;
-		if (!(data & pmu->reserved_bits)) {
-			reprogram_gp_counter(pmc, data);
-			return 0;
-		}
+	} else {
+		/* MSR_EVNTSELn */
+		pmc = get_gp_pmc_amd(pmu, msr, PMU_TYPE_EVNTSEL);
+		if (pmc) {
+			if (data == pmc->eventsel)
+				return 0;
+			if (!(data & pmu->reserved_bits)) {
+				reprogram_gp_counter(pmc, data);
+				return 0;
+			}
+		} else
+			return -ENOENT;
 	}
 
 	return 1;
diff --git a/arch/x86/kvm/pmu_intel.c b/arch/x86/kvm/pmu_intel.c
index 5ab4a364348e3..2e722451d325f 100644
--- a/arch/x86/kvm/pmu_intel.c
+++ b/arch/x86/kvm/pmu_intel.c
@@ -193,7 +193,7 @@  static int intel_pmu_get_msr(struct kvm_vcpu *vcpu, u32 msr, u64 *data)
 		}
 	}
 
-	return 1;
+	return -ENOENT;
 }
 
 static int intel_pmu_set_msr(struct kvm_vcpu *vcpu, struct msr_data *msr_info)
@@ -248,7 +248,8 @@  static int intel_pmu_set_msr(struct kvm_vcpu *vcpu, struct msr_data *msr_info)
 				reprogram_gp_counter(pmc, data);
 				return 0;
 			}
-		}
+		} else
+			return -ENOENT;
 	}
 
 	return 1;
diff --git a/arch/x86/kvm/vmx.c b/arch/x86/kvm/vmx.c
index 1d26f3c4985ba..d3253c9a8f10f 100644
--- a/arch/x86/kvm/vmx.c
+++ b/arch/x86/kvm/vmx.c
@@ -3952,7 +3952,7 @@  static int vmx_set_vmx_msr(struct kvm_vcpu *vcpu, u32 msr_index, u64 data)
 		/*
 		 * The rest of the VMX capability MSRs do not support restore.
 		 */
-		return -EINVAL;
+		return -ENOENT;
 	}
 }
 
@@ -4028,7 +4028,7 @@  static int vmx_get_vmx_msr(struct nested_vmx_msrs *msrs, u32 msr_index, u64 *pda
 		*pdata = msrs->vmfunc_controls;
 		break;
 	default:
-		return 1;
+		return -ENOENT;
 	}
 
 	return 0;
diff --git a/arch/x86/kvm/x86.c b/arch/x86/kvm/x86.c
index 7d6ab722650e4..8d2ecf5bcf899 100644
--- a/arch/x86/kvm/x86.c
+++ b/arch/x86/kvm/x86.c
@@ -2216,7 +2216,7 @@  static int set_msr_mce(struct kvm_vcpu *vcpu, struct msr_data *msr_info)
 			vcpu->arch.mce_banks[offset] = data;
 			break;
 		}
-		return 1;
+		return -ENOENT;
 	}
 	return 0;
 }
@@ -2558,7 +2558,7 @@  int kvm_set_msr_common(struct kvm_vcpu *vcpu, struct msr_data *msr_info)
 		if (!ignore_msrs) {
 			vcpu_debug_ratelimited(vcpu, "unhandled wrmsr: 0x%x data 0x%llx\n",
 				    msr, data);
-			return 1;
+			return -ENOENT;
 		} else {
 			if (report_ignored_msrs)
 				vcpu_unimpl(vcpu,
@@ -2612,7 +2612,7 @@  static int get_msr_mce(struct kvm_vcpu *vcpu, u32 msr, u64 *pdata, bool host)
 			data = vcpu->arch.mce_banks[offset];
 			break;
 		}
-		return 1;
+		return -ENOENT;
 	}
 	*pdata = data;
 	return 0;
@@ -2791,7 +2791,7 @@  int kvm_get_msr_common(struct kvm_vcpu *vcpu, struct msr_data *msr_info)
 		if (!ignore_msrs) {
 			vcpu_debug_ratelimited(vcpu, "unhandled rdmsr: 0x%x\n",
 					       msr_info->index);
-			return 1;
+			return -ENOENT;
 		} else {
 			if (report_ignored_msrs)
 				vcpu_unimpl(vcpu, "ignored rdmsr: 0x%x\n",