@@ -117,6 +117,7 @@ struct kvm_arch {
unsigned long pv_features;
s64 time_offset;
+ cpumask_t tlb_flush_pending;
unsigned long vmid[NR_CPUS];
struct kvm_context __percpu *vmcs;
};
@@ -317,6 +318,8 @@ static inline void kvm_arch_vcpu_unblocking(struct kvm_vcpu *vcpu) {}
static inline void kvm_arch_vcpu_block_finish(struct kvm_vcpu *vcpu) {}
static inline void kvm_arch_free_memslot(struct kvm *kvm, struct kvm_memory_slot *slot) {}
void kvm_check_vpid(struct kvm_vcpu *vcpu);
+#define __KVM_HAVE_ARCH_FLUSH_REMOTE_TLBS
+int kvm_arch_flush_remote_tlbs(struct kvm *kvm);
enum hrtimer_restart kvm_swtimer_wakeup(struct hrtimer *timer);
void kvm_arch_flush_remote_tlbs_memslot(struct kvm *kvm, const struct kvm_memory_slot *memslot);
void kvm_init_vmcs(struct kvm *kvm);
@@ -292,6 +292,9 @@ static void __kvm_check_vmid(struct kvm_vcpu *vcpu)
cpu = smp_processor_id();
context = per_cpu_ptr(vcpu->kvm->arch.vmcs, cpu);
+ if (cpumask_test_and_clear_cpu(cpu, &vcpu->kvm->arch.tlb_flush_pending))
+ vcpu->kvm->arch.vmid[cpu] = 0;
+
/*
* Check if our vmid is of an older version
@@ -947,6 +947,23 @@ void kvm_arch_sync_dirty_log(struct kvm *kvm, struct kvm_memory_slot *memslot)
{
}
+/*
+ * kvm_arch_flush_remote_tlbs() - flush all VM TLB entries
+ * @kvm: pointer to kvm structure.
+ */
+int kvm_arch_flush_remote_tlbs(struct kvm *kvm)
+{
+ /*
+ * Queue a TLB invalidation for each CPU to perform on next
+ * vcpu loading
+ */
+ if (cpu_has_guestid)
+ cpumask_setall(&kvm->arch.tlb_flush_pending);
+
+ /* Return 1 continue to send ipi to running vCPUs */
+ return 1;
+}
+
void kvm_arch_flush_remote_tlbs_memslot(struct kvm *kvm,
const struct kvm_memory_slot *memslot)
{
@@ -208,7 +208,12 @@ static int kvm_check_requests(struct kvm_vcpu *vcpu)
return RESUME_GUEST;
if (kvm_check_request(KVM_REQ_TLB_FLUSH, vcpu))
- vcpu->arch.vpid = 0; /* Drop vpid for this vCPU */
+ /*
+ * vpid need the same with vmid if vpid is not separated
+ * with vmid
+ */
+ if (!cpu_has_guestid)
+ vcpu->arch.vpid = 0;
if (kvm_dirty_ring_check_request(vcpu))
return RESUME_HOST;
With remote tlb flushing, vpid index stays unchanged and only vmid index is updated, since remote tlb flushing is to flush TLBs relative GPA --> HPA. For flushing method, cpumask tlb_flush_pending is added and set for all possible CPUs. When vCPUs is sched on the physical CPU, vmid is updated and cpumask for this physical CPU is cleared. Signed-off-by: Bibo Mao <maobibo@loongson.cn> --- arch/loongarch/include/asm/kvm_host.h | 3 +++ arch/loongarch/kvm/main.c | 3 +++ arch/loongarch/kvm/mmu.c | 17 +++++++++++++++++ arch/loongarch/kvm/vcpu.c | 7 ++++++- 4 files changed, 29 insertions(+), 1 deletion(-)