@@ -559,6 +559,10 @@ struct kvm {
unsigned long mn_active_invalidate_count;
struct rcuwait mn_memslots_update_rcuwait;
+ /* For invalidation of gfn_to_pfn_caches */
+ struct list_head gpc_list;
+ spinlock_t gpc_lock;
+
/*
* created_vcpus is protected by kvm->lock, and is incremented
* at the beginning of KVM_CREATE_VCPU. online_vcpus is only
@@ -966,6 +970,16 @@ int kvm_vcpu_write_guest(struct kvm_vcpu *vcpu, gpa_t gpa, const void *data,
unsigned long len);
void kvm_vcpu_mark_page_dirty(struct kvm_vcpu *vcpu, gfn_t gfn);
+int kvm_gfn_to_pfn_cache_init(struct kvm *kvm, struct gfn_to_pfn_cache *gpc,
+ struct kvm_vcpu *vcpu, bool guest_uses_pa,
+ bool kernel_map, gpa_t gpa, unsigned long len,
+ bool write);
+int kvm_gfn_to_pfn_cache_refresh(struct kvm *kvm, struct gfn_to_pfn_cache *gpc,
+ gpa_t gpa, unsigned long len, bool write);
+bool kvm_gfn_to_pfn_cache_check(struct kvm *kvm, struct gfn_to_pfn_cache *gpc,
+ gpa_t gpa, unsigned long len);
+void kvm_gfn_to_pfn_cache_destroy(struct kvm *kvm, struct gfn_to_pfn_cache *gpc);
+
void kvm_sigset_activate(struct kvm_vcpu *vcpu);
void kvm_sigset_deactivate(struct kvm_vcpu *vcpu);
@@ -19,6 +19,7 @@ struct kvm_memslots;
enum kvm_mr_change;
#include <linux/types.h>
+#include <linux/spinlock_types.h>
#include <asm/kvm_types.h>
@@ -53,6 +54,23 @@ struct gfn_to_hva_cache {
struct kvm_memory_slot *memslot;
};
+struct gfn_to_pfn_cache {
+ u64 generation;
+ gpa_t gpa;
+ unsigned long uhva;
+ struct kvm_memory_slot *memslot;
+ struct kvm_vcpu *vcpu;
+ struct list_head list;
+ rwlock_t lock;
+ void *khva;
+ kvm_pfn_t pfn;
+ bool active;
+ bool valid;
+ bool dirty;
+ bool guest_uses_pa;
+ bool kernel_map;
+};
+
#ifdef KVM_ARCH_NR_OBJS_PER_MEMORY_CACHE
/*
* Memory caches are used to preallocate memory ahead of various MMU flows,
@@ -458,6 +458,9 @@ static inline struct kvm *mmu_notifier_to_kvm(struct mmu_notifier *mn)
return container_of(mn, struct kvm, mmu_notifier);
}
+static void gfn_to_pfn_cache_invalidate(struct kvm *kvm, unsigned long start,
+ unsigned long end, bool may_block);
+
static void kvm_mmu_notifier_invalidate_range(struct mmu_notifier *mn,
struct mm_struct *mm,
unsigned long start, unsigned long end)
@@ -465,6 +468,8 @@ static void kvm_mmu_notifier_invalidate_range(struct mmu_notifier *mn,
struct kvm *kvm = mmu_notifier_to_kvm(mn);
int idx;
+ gfn_to_pfn_cache_invalidate(kvm, start, end, false);
+
idx = srcu_read_lock(&kvm->srcu);
kvm_arch_mmu_notifier_invalidate_range(kvm, start, end);
srcu_read_unlock(&kvm->srcu, idx);
@@ -1051,6 +1056,9 @@ static struct kvm *kvm_create_vm(unsigned long type)
spin_lock_init(&kvm->mn_invalidate_lock);
rcuwait_init(&kvm->mn_memslots_update_rcuwait);
+ INIT_LIST_HEAD(&kvm->gpc_list);
+ spin_lock_init(&kvm->gpc_lock);
+
INIT_LIST_HEAD(&kvm->devices);
BUILD_BUG_ON(KVM_MEM_SLOTS_NUM > SHRT_MAX);
@@ -2618,6 +2626,248 @@ void kvm_vcpu_unmap(struct kvm_vcpu *vcpu, struct kvm_host_map *map, bool dirty)
}
EXPORT_SYMBOL_GPL(kvm_vcpu_unmap);
+static void gfn_to_pfn_cache_invalidate(struct kvm *kvm, unsigned long start,
+ unsigned long end, bool may_block)
+{
+ bool wake_vcpus = false, wake_all_vcpus = false;
+ DECLARE_BITMAP(vcpu_bitmap, KVM_MAX_VCPUS);
+ struct gfn_to_pfn_cache *gpc;
+ bool called = false;
+
+ spin_lock(&kvm->gpc_lock);
+ list_for_each_entry(gpc, &kvm->gpc_list, list) {
+ write_lock_irq(&gpc->lock);
+
+ /* Only a single page so no need to care about length */
+ if (gpc->valid && !is_error_noslot_pfn(gpc->pfn) &&
+ gpc->uhva >= start && gpc->uhva < end) {
+ gpc->valid = false;
+
+ if (gpc->dirty) {
+ int idx = srcu_read_lock(&kvm->srcu);
+ mark_page_dirty(kvm, gpa_to_gfn(gpc->gpa));
+ srcu_read_unlock(&kvm->srcu, idx);
+
+ kvm_set_pfn_dirty(gpc->pfn);
+ gpc->dirty = false;
+ }
+
+ /*
+ * If a guest vCPU could be using the physical address,
+ * it needs to be woken.
+ */
+ if (gpc->guest_uses_pa) {
+ if (wake_all_vcpus) {
+ /* Nothing to do */
+ } else if (gpc->vcpu) {
+ /* Only need to wake one vCPU for this */
+ if (!wake_vcpus) {
+ wake_vcpus = true;
+ bitmap_zero(vcpu_bitmap, KVM_MAX_VCPUS);
+ }
+ __set_bit(gpc->vcpu->vcpu_idx, vcpu_bitmap);
+ } else
+ wake_all_vcpus = true;
+ }
+ }
+ write_unlock_irq(&gpc->lock);
+ }
+ spin_unlock(&kvm->gpc_lock);
+
+#if 0
+ unsigned int req = KVM_REQ_GPC_INVALIDATE;
+
+ /*
+ * If the OOM reaper is active, then all vCPUs should have been stopped
+ * already, so perform the request without KVM_REQUEST_WAIT and be sad
+ * if anything needed to be woken.
+ */
+ if (!may_block)
+ req |= ~KVM_REQUEST_WAIT;
+
+ if (wake_all_vcpus) {
+ called = kvm_make_all_cpus_request(kvm, req);
+ } else if (wake_vcpus) {
+ called = kvm_make_vcpus_request_mask(kvm, req, vcpu_bitmap);
+ }
+#endif
+ WARN_ON_ONCE(called && !may_block);
+}
+
+bool kvm_gfn_to_pfn_cache_check(struct kvm *kvm, struct gfn_to_pfn_cache *gpc,
+ gpa_t gpa, unsigned long len)
+{
+ struct kvm_memslots *slots = kvm_memslots(kvm);
+
+ if ((gpa & ~PAGE_MASK) + len > PAGE_SIZE)
+ return false;
+
+ if (gpc->gpa != gpa || gpc->generation != slots->generation ||
+ kvm_is_error_hva(gpc->uhva))
+ return false;
+
+ if (!gpc->valid)
+ return false;
+
+ return true;
+}
+EXPORT_SYMBOL_GPL(kvm_gfn_to_pfn_cache_check);
+
+int kvm_gfn_to_pfn_cache_refresh(struct kvm *kvm, struct gfn_to_pfn_cache *gpc,
+ gpa_t gpa, unsigned long len, bool write)
+{
+ struct kvm_memslots *slots = kvm_memslots(kvm);
+ unsigned long page_offset = gpa & ~PAGE_MASK;
+ kvm_pfn_t old_pfn, new_pfn;
+ unsigned long old_uhva;
+ gpa_t old_gpa;
+ void *old_khva;
+ bool old_valid, old_dirty;
+ int ret = 0;
+
+ /*
+ * If must fit within a single page. The 'len' argument is
+ * only to enforce that.
+ */
+ if (page_offset + len > PAGE_SIZE)
+ return -EINVAL;
+
+ write_lock_irq(&gpc->lock);
+
+ old_gpa = gpc->gpa;
+ old_pfn = gpc->pfn;
+ old_khva = gpc->khva;
+ old_uhva = gpc->uhva;
+ old_valid = gpc->valid;
+ old_dirty = gpc->dirty;
+
+ /* If the userspace HVA is invalid, refresh that first */
+ if (gpc->gpa != gpa || gpc->generation != slots->generation ||
+ kvm_is_error_hva(gpc->uhva)) {
+ gfn_t gfn = gpa_to_gfn(gpa);
+
+ gpc->dirty = false;
+ gpc->gpa = gpa;
+ gpc->generation = slots->generation;
+ gpc->memslot = __gfn_to_memslot(slots, gfn);
+ gpc->uhva = gfn_to_hva_memslot(gpc->memslot, gfn);
+
+ if (kvm_is_error_hva(gpc->uhva)) {
+ ret = -EFAULT;
+ goto out;
+ }
+
+ gpc->uhva += page_offset;
+ }
+
+ /*
+ * If the userspace HVA changed or the PFN was already invalid,
+ * drop the lock and do the HVA to PFN lookup again.
+ */
+ if (!old_valid || old_uhva != gpc->uhva) {
+ unsigned long uhva = gpc->uhva;
+ void *new_khva = NULL;
+
+ /* Placeholders for "hva is valid but not yet mapped" */
+ gpc->pfn = KVM_PFN_ERR_FAULT;
+ gpc->khva = NULL;
+ gpc->valid = true;
+
+ write_unlock_irq(&gpc->lock);
+
+ new_pfn = hva_to_pfn(uhva, false, NULL, true, NULL);
+ if (is_error_noslot_pfn(new_pfn))
+ ret = -EFAULT;
+ else if (gpc->kernel_map) {
+ if (new_pfn == old_pfn) {
+ new_khva = (void *)((unsigned long)old_khva - page_offset);
+ old_pfn = KVM_PFN_ERR_FAULT;
+ old_khva = NULL;
+ } else if (pfn_valid(new_pfn)) {
+ new_khva = kmap(pfn_to_page(new_pfn));
+#ifdef CONFIG_HAS_IOMEM
+ } else {
+ new_khva = memremap(pfn_to_hpa(new_pfn), PAGE_SIZE, MEMREMAP_WB);
+#endif
+ }
+ if (!new_khva)
+ ret = -EFAULT;
+ }
+
+ write_lock_irq(&gpc->lock);
+ if (ret) {
+ gpc->valid = false;
+ gpc->pfn = KVM_PFN_ERR_FAULT;
+ gpc->khva = NULL;
+ } else {
+ /* At this point, gpc->valid may already have been cleared */
+ gpc->pfn = new_pfn;
+ gpc->khva = new_khva + page_offset;
+ }
+ }
+
+ out:
+ if (ret)
+ gpc->dirty = false;
+ else
+ gpc->dirty = write;
+
+ write_unlock_irq(&gpc->lock);
+
+ /* Unmap the old page if it was mapped before */
+ if (!is_error_noslot_pfn(old_pfn)) {
+ if (pfn_valid(old_pfn)) {
+ kunmap(pfn_to_page(old_pfn));
+#ifdef CONFIG_HAS_IOMEM
+ } else {
+ memunmap(old_khva);
+#endif
+ }
+ kvm_release_pfn(old_pfn, old_dirty);
+ if (old_dirty)
+ mark_page_dirty(kvm, old_gpa);
+ }
+
+ return ret;
+}
+
+int kvm_gfn_to_pfn_cache_init(struct kvm *kvm, struct gfn_to_pfn_cache *gpc,
+ struct kvm_vcpu *vcpu, bool guest_uses_pa,
+ bool kernel_map, gpa_t gpa, unsigned long len,
+ bool write)
+{
+ if (!gpc->active) {
+ rwlock_init(&gpc->lock);
+
+ gpc->khva = NULL;
+ gpc->pfn = KVM_PFN_ERR_FAULT;
+ gpc->uhva = KVM_HVA_ERR_BAD;
+ gpc->vcpu = vcpu;
+ gpc->guest_uses_pa = guest_uses_pa;
+ gpc->kernel_map = kernel_map;
+ gpc->valid = false;
+ gpc->active = true;
+
+ spin_lock(&kvm->gpc_lock);
+ list_add(&gpc->list, &kvm->gpc_list);
+ spin_unlock(&kvm->gpc_lock);
+ }
+ return kvm_gfn_to_pfn_cache_refresh(kvm, gpc, gpa, len, write);
+}
+
+void kvm_gfn_to_pfn_cache_destroy(struct kvm *kvm, struct gfn_to_pfn_cache *gpc)
+{
+ if (gpc->active) {
+ spin_lock(&kvm->gpc_lock);
+ list_del(&gpc->list);
+ spin_unlock(&kvm->gpc_lock);
+
+ /* In failing, it will tear down any existing mapping */
+ (void)kvm_gfn_to_pfn_cache_refresh(kvm, gpc, GPA_INVALID, 0, false);
+ gpc->active = false;
+ }
+}
+
struct page *kvm_vcpu_gfn_to_page(struct kvm_vcpu *vcpu, gfn_t gfn)
{
kvm_pfn_t pfn;