@@ -523,7 +523,7 @@ long kvmppc_h_bulk_remove(struct kvm_vcpu *vcpu)
unsigned long *args = &vcpu->arch.gpr[4];
__be64 *hp, *hptes[4];
unsigned long tlbrb[4];
- long int i, j, k, n, found, indexes[4];
+ long int i, j, k, collected_hpte, found, indexes[4];
unsigned long flags, req, pte_index, rcbits;
int global;
long int ret = H_SUCCESS;
@@ -532,7 +532,7 @@ long kvmppc_h_bulk_remove(struct kvm_vcpu *vcpu)
global = global_invalidates(kvm, 0);
for (i = 0; i < 4 && ret == H_SUCCESS; ) {
- n = 0;
+ collected_hpte = 0;
for (; i < 4; ++i) {
j = i * 2;
pte_index = args[j];
@@ -554,7 +554,7 @@ long kvmppc_h_bulk_remove(struct kvm_vcpu *vcpu)
hp = (__be64 *) (kvm->arch.hpt_virt + (pte_index << 4));
/* to avoid deadlock, don't spin except for first */
if (!try_lock_hpte(hp, HPTE_V_HVLOCK)) {
- if (n)
+ if (collected_hpte)
break;
while (!try_lock_hpte(hp, HPTE_V_HVLOCK))
cpu_relax();
@@ -596,22 +596,23 @@ long kvmppc_h_bulk_remove(struct kvm_vcpu *vcpu)
/* leave it locked */
hp[0] &= ~cpu_to_be64(HPTE_V_VALID);
- tlbrb[n] = compute_tlbie_rb(be64_to_cpu(hp[0]),
- be64_to_cpu(hp[1]), pte_index);
- indexes[n] = j;
- hptes[n] = hp;
- revs[n] = rev;
- ++n;
+ tlbrb[collected_hpte] = compute_tlbie_rb(be64_to_cpu(hp[0]),
+ be64_to_cpu(hp[1]),
+ pte_index);
+ indexes[collected_hpte] = j;
+ hptes[collected_hpte] = hp;
+ revs[collected_hpte] = rev;
+ ++collected_hpte;
}
- if (!n)
+ if (!collected_hpte)
break;
/* Now that we've collected a batch, do the tlbies */
- do_tlbies(kvm, tlbrb, n, global, true);
+ do_tlbies(kvm, tlbrb, collected_hpte, global, true);
/* Read PTE low words after tlbie to get final R/C values */
- for (k = 0; k < n; ++k) {
+ for (k = 0; k < collected_hpte; ++k) {
j = indexes[k];
pte_index = args[j] & ((1ul << 56) - 1);
hp = hptes[k];
Minor cleanup Signed-off-by: Aneesh Kumar K.V <aneesh.kumar@linux.vnet.ibm.com> --- arch/powerpc/kvm/book3s_hv_rm_mmu.c | 25 +++++++++++++------------ 1 file changed, 13 insertions(+), 12 deletions(-)