diff mbox series

[40/59] KVM: arm64: nv: Don't always start an S2 MMU search from the beginning

Message ID 20190621093843.220980-41-marc.zyngier@arm.com (mailing list archive)
State New, archived
Headers show
Series KVM: arm64: ARMv8.3 Nested Virtualization support | expand

Commit Message

Marc Zyngier June 21, 2019, 9:38 a.m. UTC
Starting a S2 MMU search from the beginning all the time means that
we're potentially nuking a useful context (like we'd potentially
have on a !VHE KVM guest).

Instead, let's always start the search from the point *after* the
last allocated context. This should ensure that alternating between
two EL1 contexts will not result in nuking the whole S2 each time.

lookup_s2_mmu now has a chance to provide a hit.

Signed-off-by: Marc Zyngier <marc.zyngier@arm.com>
---
 arch/arm64/include/asm/kvm_host.h |  1 +
 arch/arm64/kvm/nested.c           | 14 ++++++++++++--
 2 files changed, 13 insertions(+), 2 deletions(-)

Comments

Alexandru Elisei July 9, 2019, 9:59 a.m. UTC | #1
On 6/21/19 10:38 AM, Marc Zyngier wrote:
> Starting a S2 MMU search from the beginning all the time means that
> we're potentially nuking a useful context (like we'd potentially
> have on a !VHE KVM guest).
>
> Instead, let's always start the search from the point *after* the
> last allocated context. This should ensure that alternating between
> two EL1 contexts will not result in nuking the whole S2 each time.
>
> lookup_s2_mmu now has a chance to provide a hit.
>
> Signed-off-by: Marc Zyngier <marc.zyngier@arm.com>
> ---
>  arch/arm64/include/asm/kvm_host.h |  1 +
>  arch/arm64/kvm/nested.c           | 14 ++++++++++++--
>  2 files changed, 13 insertions(+), 2 deletions(-)
>
> diff --git a/arch/arm64/include/asm/kvm_host.h b/arch/arm64/include/asm/kvm_host.h
> index b71a7a237f95..b7c44adcdbf3 100644
> --- a/arch/arm64/include/asm/kvm_host.h
> +++ b/arch/arm64/include/asm/kvm_host.h
> @@ -123,6 +123,7 @@ struct kvm_arch {
>  	 */
>  	struct kvm_s2_mmu *nested_mmus;
>  	size_t nested_mmus_size;
> +	int nested_mmus_next;

For consistency, shouldn't nested_mmus_next be zero initialized in
kvm_init_nested (arch/arm64/kvm/nested.c), like nested_mmus and
nested_mmus_size? Not a big deal either way, since struct kvm is allocated using
vzalloc.

>  really
>  	/* VTCR_EL2 value for this VM */
>  	u64    vtcr;
> diff --git a/arch/arm64/kvm/nested.c b/arch/arm64/kvm/nested.c
> index 09afafbdc8fe..214d59019935 100644
> --- a/arch/arm64/kvm/nested.c
> +++ b/arch/arm64/kvm/nested.c
> @@ -363,14 +363,24 @@ static struct kvm_s2_mmu *get_s2_mmu_nested(struct kvm_vcpu *vcpu)
>  	if (s2_mmu)
>  		goto out;
>  
> -	for (i = 0; i < kvm->arch.nested_mmus_size; i++) {
> -		s2_mmu = &kvm->arch.nested_mmus[i];
> +	/*
> +	 * Make sure we don't always search from the same point, or we
> +	 * will always reuse a potentially active context, leaving
> +	 * free contexts unused.
> +	 */
> +	for (i = kvm->arch.nested_mmus_next;
> +	     i < (kvm->arch.nested_mmus_size + kvm->arch.nested_mmus_next);
> +	     i++) {
> +		s2_mmu = &kvm->arch.nested_mmus[i % kvm->arch.nested_mmus_size];
>  
>  		if (atomic_read(&s2_mmu->refcnt) == 0)
>  			break;
>  	}
>  	BUG_ON(atomic_read(&s2_mmu->refcnt)); /* We have struct MMUs to spare */
>  
> +	/* Set the scene for the next search */
> +	kvm->arch.nested_mmus_next = (i + 1) % kvm->arch.nested_mmus_size;
> +
>  	if (kvm_s2_mmu_valid(s2_mmu)) {
>  		/* Clear the old state */
>  		kvm_unmap_stage2_range(s2_mmu, 0, kvm_phys_size(kvm));
diff mbox series

Patch

diff --git a/arch/arm64/include/asm/kvm_host.h b/arch/arm64/include/asm/kvm_host.h
index b71a7a237f95..b7c44adcdbf3 100644
--- a/arch/arm64/include/asm/kvm_host.h
+++ b/arch/arm64/include/asm/kvm_host.h
@@ -123,6 +123,7 @@  struct kvm_arch {
 	 */
 	struct kvm_s2_mmu *nested_mmus;
 	size_t nested_mmus_size;
+	int nested_mmus_next;
 
 	/* VTCR_EL2 value for this VM */
 	u64    vtcr;
diff --git a/arch/arm64/kvm/nested.c b/arch/arm64/kvm/nested.c
index 09afafbdc8fe..214d59019935 100644
--- a/arch/arm64/kvm/nested.c
+++ b/arch/arm64/kvm/nested.c
@@ -363,14 +363,24 @@  static struct kvm_s2_mmu *get_s2_mmu_nested(struct kvm_vcpu *vcpu)
 	if (s2_mmu)
 		goto out;
 
-	for (i = 0; i < kvm->arch.nested_mmus_size; i++) {
-		s2_mmu = &kvm->arch.nested_mmus[i];
+	/*
+	 * Make sure we don't always search from the same point, or we
+	 * will always reuse a potentially active context, leaving
+	 * free contexts unused.
+	 */
+	for (i = kvm->arch.nested_mmus_next;
+	     i < (kvm->arch.nested_mmus_size + kvm->arch.nested_mmus_next);
+	     i++) {
+		s2_mmu = &kvm->arch.nested_mmus[i % kvm->arch.nested_mmus_size];
 
 		if (atomic_read(&s2_mmu->refcnt) == 0)
 			break;
 	}
 	BUG_ON(atomic_read(&s2_mmu->refcnt)); /* We have struct MMUs to spare */
 
+	/* Set the scene for the next search */
+	kvm->arch.nested_mmus_next = (i + 1) % kvm->arch.nested_mmus_size;
+
 	if (kvm_s2_mmu_valid(s2_mmu)) {
 		/* Clear the old state */
 		kvm_unmap_stage2_range(s2_mmu, 0, kvm_phys_size(kvm));