diff mbox series

[v2,1/4] kvm: nested: Introduce read_and_check_msr_entry()

Message ID 20191105191910.56505-2-aaronlewis@google.com (mailing list archive)
State New, archived
Headers show
Series Add support for capturing the highest observable L2 TSC | expand

Commit Message

Aaron Lewis Nov. 5, 2019, 7:19 p.m. UTC
Add the function read_and_check_msr_entry() which just pulls some code
out of nested_vmx_store_msr() for now, however, this is in preparation
for a change later in this series were we reuse the code in
read_and_check_msr_entry().

Reviewed-by: Jim Mattson <jmattson@google.com>
Signed-off-by: Aaron Lewis <aaronlewis@google.com>
---
 arch/x86/kvm/vmx/nested.c | 35 ++++++++++++++++++++++-------------
 1 file changed, 22 insertions(+), 13 deletions(-)

--
2.24.0.rc1.363.gb1bccd3e3d-goog

Comments

Liran Alon Nov. 5, 2019, 9:27 p.m. UTC | #1
> On 5 Nov 2019, at 21:19, Aaron Lewis <aaronlewis@google.com> wrote:
> 
> Add the function read_and_check_msr_entry() which just pulls some code
> out of nested_vmx_store_msr() for now, however, this is in preparation
> for a change later in this series were we reuse the code in
> read_and_check_msr_entry().

Please don’t refer to “this series” in commit message.
As once this patch series will be merged, “this series” will be meaning-less.

Prefer to just explain what is the change that will be introduced in future commits that requires this change.
E.g. “This new utility function will be used by upcoming patches that will introduce code which search vmcs12->vm_exit_msr_store for specific entry."

> 
> Reviewed-by: Jim Mattson <jmattson@google.com>
> Signed-off-by: Aaron Lewis <aaronlewis@google.com>

Reviewed-by: Liran Alon <liran.alon@oracle.com>

> ---
> arch/x86/kvm/vmx/nested.c | 35 ++++++++++++++++++++++-------------
> 1 file changed, 22 insertions(+), 13 deletions(-)
> 
> diff --git a/arch/x86/kvm/vmx/nested.c b/arch/x86/kvm/vmx/nested.c
> index e76eb4f07f6c..7b058d7b9fcc 100644
> --- a/arch/x86/kvm/vmx/nested.c
> +++ b/arch/x86/kvm/vmx/nested.c
> @@ -929,6 +929,26 @@ static u32 nested_vmx_load_msr(struct kvm_vcpu *vcpu, u64 gpa, u32 count)
> 	return i + 1;
> }
> 
> +static bool read_and_check_msr_entry(struct kvm_vcpu *vcpu, u64 gpa, int i,
> +				     struct vmx_msr_entry *e)
> +{
> +	if (kvm_vcpu_read_guest(vcpu,
> +				gpa + i * sizeof(*e),
> +				e, 2 * sizeof(u32))) {
> +		pr_debug_ratelimited(
> +			"%s cannot read MSR entry (%u, 0x%08llx)\n",
> +			__func__, i, gpa + i * sizeof(*e));
> +		return false;
> +	}
> +	if (nested_vmx_store_msr_check(vcpu, e)) {
> +		pr_debug_ratelimited(
> +			"%s check failed (%u, 0x%x, 0x%x)\n",
> +			__func__, i, e->index, e->reserved);
> +		return false;
> +	}
> +	return true;
> +}
> +
> static int nested_vmx_store_msr(struct kvm_vcpu *vcpu, u64 gpa, u32 count)
> {
> 	u64 data;
> @@ -940,20 +960,9 @@ static int nested_vmx_store_msr(struct kvm_vcpu *vcpu, u64 gpa, u32 count)
> 		if (unlikely(i >= max_msr_list_size))
> 			return -EINVAL;
> 
> -		if (kvm_vcpu_read_guest(vcpu,
> -					gpa + i * sizeof(e),
> -					&e, 2 * sizeof(u32))) {
> -			pr_debug_ratelimited(
> -				"%s cannot read MSR entry (%u, 0x%08llx)\n",
> -				__func__, i, gpa + i * sizeof(e));
> +		if (!read_and_check_msr_entry(vcpu, gpa, i, &e))
> 			return -EINVAL;
> -		}
> -		if (nested_vmx_store_msr_check(vcpu, &e)) {
> -			pr_debug_ratelimited(
> -				"%s check failed (%u, 0x%x, 0x%x)\n",
> -				__func__, i, e.index, e.reserved);
> -			return -EINVAL;
> -		}
> +
> 		if (kvm_get_msr(vcpu, e.index, &data)) {
> 			pr_debug_ratelimited(
> 				"%s cannot read MSR (%u, 0x%x)\n",
> --
> 2.24.0.rc1.363.gb1bccd3e3d-goog
>
diff mbox series

Patch

diff --git a/arch/x86/kvm/vmx/nested.c b/arch/x86/kvm/vmx/nested.c
index e76eb4f07f6c..7b058d7b9fcc 100644
--- a/arch/x86/kvm/vmx/nested.c
+++ b/arch/x86/kvm/vmx/nested.c
@@ -929,6 +929,26 @@  static u32 nested_vmx_load_msr(struct kvm_vcpu *vcpu, u64 gpa, u32 count)
 	return i + 1;
 }

+static bool read_and_check_msr_entry(struct kvm_vcpu *vcpu, u64 gpa, int i,
+				     struct vmx_msr_entry *e)
+{
+	if (kvm_vcpu_read_guest(vcpu,
+				gpa + i * sizeof(*e),
+				e, 2 * sizeof(u32))) {
+		pr_debug_ratelimited(
+			"%s cannot read MSR entry (%u, 0x%08llx)\n",
+			__func__, i, gpa + i * sizeof(*e));
+		return false;
+	}
+	if (nested_vmx_store_msr_check(vcpu, e)) {
+		pr_debug_ratelimited(
+			"%s check failed (%u, 0x%x, 0x%x)\n",
+			__func__, i, e->index, e->reserved);
+		return false;
+	}
+	return true;
+}
+
 static int nested_vmx_store_msr(struct kvm_vcpu *vcpu, u64 gpa, u32 count)
 {
 	u64 data;
@@ -940,20 +960,9 @@  static int nested_vmx_store_msr(struct kvm_vcpu *vcpu, u64 gpa, u32 count)
 		if (unlikely(i >= max_msr_list_size))
 			return -EINVAL;

-		if (kvm_vcpu_read_guest(vcpu,
-					gpa + i * sizeof(e),
-					&e, 2 * sizeof(u32))) {
-			pr_debug_ratelimited(
-				"%s cannot read MSR entry (%u, 0x%08llx)\n",
-				__func__, i, gpa + i * sizeof(e));
+		if (!read_and_check_msr_entry(vcpu, gpa, i, &e))
 			return -EINVAL;
-		}
-		if (nested_vmx_store_msr_check(vcpu, &e)) {
-			pr_debug_ratelimited(
-				"%s check failed (%u, 0x%x, 0x%x)\n",
-				__func__, i, e.index, e.reserved);
-			return -EINVAL;
-		}
+
 		if (kvm_get_msr(vcpu, e.index, &data)) {
 			pr_debug_ratelimited(
 				"%s cannot read MSR (%u, 0x%x)\n",