diff mbox series

[09/10] KVM: SVM: Drop redundant check in AVIC code on ID during vCPU creation

Message ID 20230815213533.548732-10-seanjc@google.com (mailing list archive)
State New, archived
Headers show
Series VM: SVM: Honor KVM_MAX_VCPUS when AVIC is enabled | expand

Commit Message

Sean Christopherson Aug. 15, 2023, 9:35 p.m. UTC
Drop avic_get_physical_id_entry()'s compatibility check on the incoming
ID, as its sole caller, avic_init_backing_page(), performs the exact same
check.  Drop avic_get_physical_id_entry() entirely as the only remaining
functionality is getting the address of the Physical ID table, and
accessing the array without an immediate bounds check is kludgy.

No functional change intended.

Signed-off-by: Sean Christopherson <seanjc@google.com>
---
 arch/x86/kvm/svm/avic.c | 28 ++++++----------------------
 1 file changed, 6 insertions(+), 22 deletions(-)

Comments

Maxim Levitsky Oct. 5, 2023, 12:58 p.m. UTC | #1
У вт, 2023-08-15 у 14:35 -0700, Sean Christopherson пише:
> Drop avic_get_physical_id_entry()'s compatibility check on the incoming
> ID, as its sole caller, avic_init_backing_page(), performs the exact same
> check.  Drop avic_get_physical_id_entry() entirely as the only remaining
> functionality is getting the address of the Physical ID table, and
> accessing the array without an immediate bounds check is kludgy.
> 
> No functional change intended.
> 
> Signed-off-by: Sean Christopherson <seanjc@google.com>
> ---
>  arch/x86/kvm/svm/avic.c | 28 ++++++----------------------
>  1 file changed, 6 insertions(+), 22 deletions(-)
> 
> diff --git a/arch/x86/kvm/svm/avic.c b/arch/x86/kvm/svm/avic.c
> index 3b2d00d9ca9b..6803e2d7bc22 100644
> --- a/arch/x86/kvm/svm/avic.c
> +++ b/arch/x86/kvm/svm/avic.c
> @@ -263,26 +263,12 @@ void avic_init_vmcb(struct vcpu_svm *svm, struct vmcb *vmcb)
>  		avic_deactivate_vmcb(svm);
>  }
>  
> -static u64 *avic_get_physical_id_entry(struct kvm_vcpu *vcpu,
> -				       unsigned int index)
> -{
> -	u64 *avic_physical_id_table;
> -	struct kvm_svm *kvm_svm = to_kvm_svm(vcpu->kvm);
> -
> -	if ((!x2avic_enabled && index > AVIC_MAX_PHYSICAL_ID) ||
> -	    (index > X2AVIC_MAX_PHYSICAL_ID))
> -		return NULL;

While removing this code doesn't introduce a bug, it does make it less safe,
because new code just blindly trusts that vcpu_id will never be out of bounds
of the physical id table.

Bugs happen and that can and will someday happen.

> -
> -	avic_physical_id_table = page_address(kvm_svm->avic_physical_id_table_page);
> -
> -	return &avic_physical_id_table[index];
> -}
> -
>  static int avic_init_backing_page(struct kvm_vcpu *vcpu)
>  {
> -	u64 *entry, new_entry;
> +	struct kvm_svm *kvm_svm = to_kvm_svm(vcpu->kvm);
> +	struct vcpu_svm *svm = to_svm(vcpu);
> +	u64 *table, new_entry;
>  	int id = vcpu->vcpu_id;
> -	struct vcpu_svm *svm = to_svm(vcpu);
>  
>  	/*
>  	 * Inhibit AVIC if the vCPU ID is bigger than what is supported by AVIC
> @@ -318,15 +304,13 @@ static int avic_init_backing_page(struct kvm_vcpu *vcpu)
>  	}
>  
>  	/* Setting AVIC backing page address in the phy APIC ID table */
> -	entry = avic_get_physical_id_entry(vcpu, id);
> -	if (!entry)
> -		return -EINVAL;
> +	table = page_address(kvm_svm->avic_physical_id_table_page);
>  
>  	new_entry = avic_get_backing_page_address(svm) |
>  		    AVIC_PHYSICAL_ID_ENTRY_VALID_MASK;
> -	WRITE_ONCE(*entry, new_entry);

Here I prefer to at least have an assert that id is in bounds of a page 
(at least less than 512) so that a bug will not turn into a security
issue by overflowing the buffer.

> +	WRITE_ONCE(table[id], new_entry);
>  
> -	svm->avic_physical_id_cache = entry;
> +	svm->avic_physical_id_cache = &table[id];
>  
>  	return 0;
>  }

Best regards,
	Maxim Levitsky
diff mbox series

Patch

diff --git a/arch/x86/kvm/svm/avic.c b/arch/x86/kvm/svm/avic.c
index 3b2d00d9ca9b..6803e2d7bc22 100644
--- a/arch/x86/kvm/svm/avic.c
+++ b/arch/x86/kvm/svm/avic.c
@@ -263,26 +263,12 @@  void avic_init_vmcb(struct vcpu_svm *svm, struct vmcb *vmcb)
 		avic_deactivate_vmcb(svm);
 }
 
-static u64 *avic_get_physical_id_entry(struct kvm_vcpu *vcpu,
-				       unsigned int index)
-{
-	u64 *avic_physical_id_table;
-	struct kvm_svm *kvm_svm = to_kvm_svm(vcpu->kvm);
-
-	if ((!x2avic_enabled && index > AVIC_MAX_PHYSICAL_ID) ||
-	    (index > X2AVIC_MAX_PHYSICAL_ID))
-		return NULL;
-
-	avic_physical_id_table = page_address(kvm_svm->avic_physical_id_table_page);
-
-	return &avic_physical_id_table[index];
-}
-
 static int avic_init_backing_page(struct kvm_vcpu *vcpu)
 {
-	u64 *entry, new_entry;
+	struct kvm_svm *kvm_svm = to_kvm_svm(vcpu->kvm);
+	struct vcpu_svm *svm = to_svm(vcpu);
+	u64 *table, new_entry;
 	int id = vcpu->vcpu_id;
-	struct vcpu_svm *svm = to_svm(vcpu);
 
 	/*
 	 * Inhibit AVIC if the vCPU ID is bigger than what is supported by AVIC
@@ -318,15 +304,13 @@  static int avic_init_backing_page(struct kvm_vcpu *vcpu)
 	}
 
 	/* Setting AVIC backing page address in the phy APIC ID table */
-	entry = avic_get_physical_id_entry(vcpu, id);
-	if (!entry)
-		return -EINVAL;
+	table = page_address(kvm_svm->avic_physical_id_table_page);
 
 	new_entry = avic_get_backing_page_address(svm) |
 		    AVIC_PHYSICAL_ID_ENTRY_VALID_MASK;
-	WRITE_ONCE(*entry, new_entry);
+	WRITE_ONCE(table[id], new_entry);
 
-	svm->avic_physical_id_cache = entry;
+	svm->avic_physical_id_cache = &table[id];
 
 	return 0;
 }