Message ID | 20220921152436.3673454-11-vkuznets@redhat.com (mailing list archive) |
---|---|
State | New, archived |
Headers | show |
Series | KVM: x86: hyper-v: Fine-grained TLB flush + L2 TLB flush features | expand |
On Wed, Sep 21, 2022, Vitaly Kuznetsov wrote: > Get rid of on-stack allocation of vcpu_mask and optimize kvm_hv_send_ipi() > for a smaller number of vCPUs in the request. When Hyper-V TLB flush > is in use, HvSendSyntheticClusterIpi{,Ex} calls are not commonly used to > send IPIs to a large number of vCPUs (and are rarely used in general). > > Introduce hv_is_vp_in_sparse_set() to directly check if the specified > VP_ID is present in sparse vCPU set. > > Reviewed-by: Maxim Levitsky <mlevitsk@redhat.com> > Signed-off-by: Vitaly Kuznetsov <vkuznets@redhat.com> > --- > arch/x86/kvm/hyperv.c | 37 ++++++++++++++++++++++++++----------- > 1 file changed, 26 insertions(+), 11 deletions(-) > > diff --git a/arch/x86/kvm/hyperv.c b/arch/x86/kvm/hyperv.c > index 69891c48c12a..9764ebb7fd5f 100644 > --- a/arch/x86/kvm/hyperv.c > +++ b/arch/x86/kvm/hyperv.c > @@ -1741,6 +1741,25 @@ static void sparse_set_to_vcpu_mask(struct kvm *kvm, u64 *sparse_banks, > } > } > > +static bool hv_is_vp_in_sparse_set(u32 vp_id, u64 valid_bank_mask, u64 sparse_banks[]) > +{ > + int bank, sbank = 0; > + > + if (!test_bit(vp_id / HV_VCPUS_PER_SPARSE_BANK, > + (unsigned long *)&valid_bank_mask)) > + return false; > + > + for_each_set_bit(bank, (unsigned long *)&valid_bank_mask, > + KVM_HV_MAX_SPARSE_VCPU_SET_BITS) { > + if (bank == vp_id / HV_VCPUS_PER_SPARSE_BANK) > + break; > + sbank++; At the risk of being too clever, this can be heavily optimized, which given what this helper is used for is probably worth doing. The index into sparse_banks is the number of bits preceding the target bit, and POPCNT can determine the number of bits. So, to get the index, simply strip off the upper bits and do hweight64(). And to avoid bugs while also optimizing for "small" VMs, the math can be skipped if vp_id < 64, i.e. if bank==0, because in that case there can't possibly be preceding bits. Compile tested only... int valid_bit_nr = vp_id / HV_VCPUS_PER_SPARSE_BANK; unsigned long sbank; if (!test_bit(valid_bit_nr, (unsigned long *)&valid_bank_mask)) return false; /* * The index into the sparse bank is the number of preceding bits in * the valid mask. Optimize for VMs with <64 vCPUs by skipping the * fancy math if there can't possibly be preceding bits. */ if (valid_bit_nr) sbank = hweight64(valid_bank_mask & GENMASK_ULL(valid_bit_nr - 1, 0)); else sbank = 0; return test_bit(vp_id % HV_VCPUS_PER_SPARSE_BANK, (unsigned long *)&sparse_banks[sbank]); yields this, where the "call __sw_hweight64" will be patched to POPCNT on 64-bit hosts (POPCNT has been around for a long time). call 0xffffffff810c3ea0 <__fentry__> push %rbp mov %edi,%eax mov %rsp,%rbp shr $0x6,%eax sub $0x8,%rsp mov %rsi,-0x8(%rbp) mov %eax,%ecx bt %rcx,-0x8(%rbp) setb %cl jae 0xffffffff81064784 <hv_is_vp_in_sparse_set+52> test %eax,%eax mov %edi,%r8d jne 0xffffffff8106478c <hv_is_vp_in_sparse_set+60> and $0x3f,%r8d bt %r8,(%rdx) setb %cl leave mov %ecx,%eax jmp 0xffffffff81c02200 <__x86_return_thunk> mov $0x40,%ecx mov $0xffffffffffffffff,%rdi sub %eax,%ecx shr %cl,%rdi and -0x8(%rbp),%rdi call 0xffffffff815beea0 <__sw_hweight64> lea (%rdx,%rax,8),%rdx jmp 0xffffffff81064779 <hv_is_vp_in_sparse_set+41> Alternatively, we could choose not to optimize bit==0 and just do: sbank = hweight64(valid_bank_mask & GENMASK_ULL(valid_bit_nr, 0)) - 1; but there's enough prep work needed for hweight64() that I think it's worth opimtizing because "small" VMs are probably very common.
diff --git a/arch/x86/kvm/hyperv.c b/arch/x86/kvm/hyperv.c index 69891c48c12a..9764ebb7fd5f 100644 --- a/arch/x86/kvm/hyperv.c +++ b/arch/x86/kvm/hyperv.c @@ -1741,6 +1741,25 @@ static void sparse_set_to_vcpu_mask(struct kvm *kvm, u64 *sparse_banks, } } +static bool hv_is_vp_in_sparse_set(u32 vp_id, u64 valid_bank_mask, u64 sparse_banks[]) +{ + int bank, sbank = 0; + + if (!test_bit(vp_id / HV_VCPUS_PER_SPARSE_BANK, + (unsigned long *)&valid_bank_mask)) + return false; + + for_each_set_bit(bank, (unsigned long *)&valid_bank_mask, + KVM_HV_MAX_SPARSE_VCPU_SET_BITS) { + if (bank == vp_id / HV_VCPUS_PER_SPARSE_BANK) + break; + sbank++; + } + + return test_bit(vp_id % HV_VCPUS_PER_SPARSE_BANK, + (unsigned long *)&sparse_banks[sbank]); +} + struct kvm_hv_hcall { u64 param; u64 ingpa; @@ -2023,8 +2042,8 @@ static u64 kvm_hv_flush_tlb(struct kvm_vcpu *vcpu, struct kvm_hv_hcall *hc) ((u64)hc->rep_cnt << HV_HYPERCALL_REP_COMP_OFFSET); } -static void kvm_send_ipi_to_many(struct kvm *kvm, u32 vector, - unsigned long *vcpu_bitmap) +static void kvm_hv_send_ipi_to_many(struct kvm *kvm, u32 vector, + u64 *sparse_banks, u64 valid_bank_mask) { struct kvm_lapic_irq irq = { .delivery_mode = APIC_DM_FIXED, @@ -2034,7 +2053,10 @@ static void kvm_send_ipi_to_many(struct kvm *kvm, u32 vector, unsigned long i; kvm_for_each_vcpu(i, vcpu, kvm) { - if (vcpu_bitmap && !test_bit(i, vcpu_bitmap)) + if (sparse_banks && + !hv_is_vp_in_sparse_set(kvm_hv_get_vpindex(vcpu), + valid_bank_mask, + sparse_banks)) continue; /* We fail only when APIC is disabled */ @@ -2047,7 +2069,6 @@ static u64 kvm_hv_send_ipi(struct kvm_vcpu *vcpu, struct kvm_hv_hcall *hc) struct kvm *kvm = vcpu->kvm; struct hv_send_ipi_ex send_ipi_ex; struct hv_send_ipi send_ipi; - DECLARE_BITMAP(vcpu_mask, KVM_MAX_VCPUS); u64 valid_bank_mask; u64 sparse_banks[KVM_HV_MAX_SPARSE_VCPU_SET_BITS]; u32 vector; @@ -2109,13 +2130,7 @@ static u64 kvm_hv_send_ipi(struct kvm_vcpu *vcpu, struct kvm_hv_hcall *hc) if ((vector < HV_IPI_LOW_VECTOR) || (vector > HV_IPI_HIGH_VECTOR)) return HV_STATUS_INVALID_HYPERCALL_INPUT; - if (all_cpus) { - kvm_send_ipi_to_many(kvm, vector, NULL); - } else { - sparse_set_to_vcpu_mask(kvm, sparse_banks, valid_bank_mask, vcpu_mask); - - kvm_send_ipi_to_many(kvm, vector, vcpu_mask); - } + kvm_hv_send_ipi_to_many(kvm, vector, all_cpus ? NULL : sparse_banks, valid_bank_mask); ret_success: return HV_STATUS_SUCCESS;