diff mbox

[08/10] KVM: x86: switch kvm_set_memory_alias to SRCU update

Message ID 20090921234124.709624825@amt.cnet (mailing list archive)
State New, archived
Headers show

Commit Message

Marcelo Tosatti Sept. 21, 2009, 11:37 p.m. UTC
Using a similar two-step procedure as for memslots.

Signed-off-by: Marcelo Tosatti <mtosatti@redhat.com>



--
To unsubscribe from this list: send the line "unsubscribe kvm" in
the body of a message to majordomo@vger.kernel.org
More majordomo info at  http://vger.kernel.org/majordomo-info.html

Comments

Avi Kivity Sept. 22, 2009, 7:04 a.m. UTC | #1
On 09/22/2009 02:37 AM, Marcelo Tosatti wrote:
> Using a similar two-step procedure as for memslots.
>
>
>
> -	gfn = unalias_gfn(kvm, gfn);
> +	gfn = unalias_gfn(kvm, gfn, false);
>    

To improve readability suggest two names, maybe unalias_gfn() and 
unalias_gfn_instantiation().  boolean parameters are hard to read at the 
call site.

Also we've discussed in the past converting aliases to private slots 
(and also echo aliases >> feature-removal-schedule.txt); maybe that is 
an easier way of verifying everything still works.
diff mbox

Patch

Index: kvm-slotslock/arch/x86/include/asm/kvm_host.h
===================================================================
--- kvm-slotslock.orig/arch/x86/include/asm/kvm_host.h
+++ kvm-slotslock/arch/x86/include/asm/kvm_host.h
@@ -373,10 +373,13 @@  struct kvm_vcpu_arch {
 	u64 *mce_banks;
 };
 
+#define KVM_ALIAS_INVALID     1UL
+
 struct kvm_mem_alias {
 	gfn_t base_gfn;
 	unsigned long npages;
 	gfn_t target_gfn;
+	unsigned long flags;
 };
 
 struct kvm_mem_aliases {
Index: kvm-slotslock/arch/x86/kvm/mmu.c
===================================================================
--- kvm-slotslock.orig/arch/x86/kvm/mmu.c
+++ kvm-slotslock/arch/x86/kvm/mmu.c
@@ -424,7 +424,7 @@  static void account_shadowed(struct kvm 
 	int *write_count;
 	int i;
 
-	gfn = unalias_gfn(kvm, gfn);
+	gfn = unalias_gfn(kvm, gfn, false);
 
 	slot = gfn_to_memslot_unaliased(kvm, gfn);
 	for (i = PT_DIRECTORY_LEVEL;
@@ -440,7 +440,7 @@  static void unaccount_shadowed(struct kv
 	int *write_count;
 	int i;
 
-	gfn = unalias_gfn(kvm, gfn);
+	gfn = unalias_gfn(kvm, gfn, false);
 	for (i = PT_DIRECTORY_LEVEL;
 	     i < PT_PAGE_TABLE_LEVEL + KVM_NR_PAGE_SIZES; ++i) {
 		slot          = gfn_to_memslot_unaliased(kvm, gfn);
@@ -457,7 +457,7 @@  static int has_wrprotected_page(struct k
 	struct kvm_memory_slot *slot;
 	int *largepage_idx;
 
-	gfn = unalias_gfn(kvm, gfn);
+	gfn = unalias_gfn(kvm, gfn, false);
 	slot = gfn_to_memslot_unaliased(kvm, gfn);
 	if (slot) {
 		largepage_idx = slot_largepage_idx(gfn, slot, level);
@@ -565,7 +565,7 @@  static int rmap_add(struct kvm_vcpu *vcp
 
 	if (!is_rmap_spte(*spte))
 		return count;
-	gfn = unalias_gfn(vcpu->kvm, gfn);
+	gfn = unalias_gfn(vcpu->kvm, gfn, false);
 	sp = page_header(__pa(spte));
 	sp->gfns[spte - sp->spt] = gfn;
 	rmapp = gfn_to_rmap(vcpu->kvm, gfn, sp->role.level);
@@ -703,7 +703,7 @@  static int rmap_write_protect(struct kvm
 	u64 *spte;
 	int i, write_protected = 0;
 
-	gfn = unalias_gfn(kvm, gfn);
+	gfn = unalias_gfn(kvm, gfn, false);
 	rmapp = gfn_to_rmap(kvm, gfn, PT_PAGE_TABLE_LEVEL);
 
 	spte = rmap_next(kvm, rmapp, NULL);
@@ -836,7 +836,7 @@  static void rmap_recycle(struct kvm_vcpu
 
 	sp = page_header(__pa(spte));
 
-	gfn = unalias_gfn(vcpu->kvm, gfn);
+	gfn = unalias_gfn(vcpu->kvm, gfn, false);
 	rmapp = gfn_to_rmap(vcpu->kvm, gfn, sp->role.level);
 
 	kvm_unmap_rmapp(vcpu->kvm, rmapp);
@@ -3358,7 +3358,7 @@  static void audit_write_protection(struc
 		if (sp->unsync)
 			continue;
 
-		gfn = unalias_gfn(vcpu->kvm, sp->gfn);
+		gfn = unalias_gfn(vcpu->kvm, sp->gfn, false);
 		slot = gfn_to_memslot_unaliased(vcpu->kvm, sp->gfn);
 		rmapp = &slot->rmap[gfn - slot->base_gfn];
 
Index: kvm-slotslock/arch/x86/kvm/x86.c
===================================================================
--- kvm-slotslock.orig/arch/x86/kvm/x86.c
+++ kvm-slotslock/arch/x86/kvm/x86.c
@@ -37,6 +37,7 @@ 
 #include <linux/iommu.h>
 #include <linux/intel-iommu.h>
 #include <linux/cpufreq.h>
+#include <linux/srcu.h>
 #include <trace/events/kvm.h>
 #undef TRACE_INCLUDE_FILE
 #define CREATE_TRACE_POINTS
@@ -1957,14 +1958,18 @@  static int kvm_vm_ioctl_get_nr_mmu_pages
 	return kvm->arch.n_alloc_mmu_pages;
 }
 
-gfn_t unalias_gfn(struct kvm *kvm, gfn_t gfn)
+gfn_t unalias_gfn(struct kvm *kvm, gfn_t gfn, bool instantiation)
 {
 	int i;
 	struct kvm_mem_alias *alias;
-	struct kvm_mem_aliases *aliases = kvm->arch.aliases;
+	struct kvm_mem_aliases *aliases;
+
+	aliases = rcu_dereference(kvm->arch.aliases);
 
 	for (i = 0; i < aliases->naliases; ++i) {
 		alias = &aliases->aliases[i];
+		if (instantiation && (alias->flags & KVM_ALIAS_INVALID))
+			continue;
 		if (gfn >= alias->base_gfn
 		    && gfn < alias->base_gfn + alias->npages)
 			return alias->target_gfn + gfn - alias->base_gfn;
@@ -1982,7 +1987,7 @@  static int kvm_vm_ioctl_set_memory_alias
 {
 	int r, n;
 	struct kvm_mem_alias *p;
-	struct kvm_mem_aliases *aliases;
+	struct kvm_mem_aliases *aliases, *old_aliases;
 
 	r = -EINVAL;
 	/* General sanity checks */
@@ -1999,23 +2004,43 @@  static int kvm_vm_ioctl_set_memory_alias
 	    < alias->target_phys_addr)
 		goto out;
 
+	r = -ENOMEM;
+	aliases = kzalloc(sizeof(struct kvm_mem_aliases), GFP_KERNEL);
+	if (!aliases)
+		goto out;
+
 	down_write(&kvm->slots_lock);
-	spin_lock(&kvm->mmu_lock);
 
-	aliases = kvm->arch.aliases;
+	/* invalidate any gfn reference in case of deletion/shrinking */
+	memcpy(aliases, kvm->arch.aliases, sizeof(struct kvm_mem_aliases));
+	aliases->aliases[alias->slot].flags |= KVM_ALIAS_INVALID;
+	old_aliases = kvm->arch.aliases;
+	rcu_assign_pointer(kvm->arch.aliases, aliases);
+	synchronize_srcu(&kvm->srcu);
+	kvm_mmu_zap_all(kvm);
+	kfree(old_aliases);
+
+	r = -ENOMEM;
+	aliases = kzalloc(sizeof(struct kvm_mem_aliases), GFP_KERNEL);
+	if (!aliases)
+		goto out;
+	memcpy(aliases, kvm->arch.aliases, sizeof(struct kvm_mem_aliases));
 
 	p = &aliases->aliases[alias->slot];
 	p->base_gfn = alias->guest_phys_addr >> PAGE_SHIFT;
 	p->npages = alias->memory_size >> PAGE_SHIFT;
 	p->target_gfn = alias->target_phys_addr >> PAGE_SHIFT;
+	p->flags &= ~(KVM_ALIAS_INVALID);
 
 	for (n = KVM_ALIAS_SLOTS; n > 0; --n)
 		if (aliases->aliases[n - 1].npages)
 			break;
 	aliases->naliases = n;
 
-	spin_unlock(&kvm->mmu_lock);
-	kvm_mmu_zap_all(kvm);
+	old_aliases = kvm->arch.aliases;
+	rcu_assign_pointer(kvm->arch.aliases, aliases);
+	synchronize_srcu(&kvm->srcu);
+	kfree(old_aliases);
 
 	up_write(&kvm->slots_lock);
 
Index: kvm-slotslock/include/linux/kvm_host.h
===================================================================
--- kvm-slotslock.orig/include/linux/kvm_host.h
+++ kvm-slotslock/include/linux/kvm_host.h
@@ -265,7 +265,7 @@  void kvm_arch_commit_memory_region(struc
 				int user_alloc);
 void kvm_disable_largepages(void);
 void kvm_arch_flush_shadow(struct kvm *kvm);
-gfn_t unalias_gfn(struct kvm *kvm, gfn_t gfn);
+gfn_t unalias_gfn(struct kvm *kvm, gfn_t gfn, bool instantiation);
 struct page *gfn_to_page(struct kvm *kvm, gfn_t gfn);
 unsigned long gfn_to_hva(struct kvm *kvm, gfn_t gfn);
 void kvm_release_page_clean(struct page *page);
Index: kvm-slotslock/virt/kvm/kvm_main.c
===================================================================
--- kvm-slotslock.orig/virt/kvm/kvm_main.c
+++ kvm-slotslock/virt/kvm/kvm_main.c
@@ -819,7 +819,7 @@  EXPORT_SYMBOL_GPL(gfn_to_memslot_unalias
 
 struct kvm_memory_slot *gfn_to_memslot(struct kvm *kvm, gfn_t gfn)
 {
-	gfn = unalias_gfn(kvm, gfn);
+	gfn = unalias_gfn(kvm, gfn, false);
 	return gfn_to_memslot_unaliased(kvm, gfn);
 }
 
@@ -828,7 +828,7 @@  int kvm_is_visible_gfn(struct kvm *kvm, 
 	int i;
 	struct kvm_memslots *slots = rcu_dereference(kvm->memslots);
 
-	gfn = unalias_gfn(kvm, gfn);
+	gfn = unalias_gfn(kvm, gfn, true);
 	for (i = 0; i < KVM_MEMORY_SLOTS; ++i) {
 		struct kvm_memory_slot *memslot = &slots->memslots[i];
 
@@ -847,7 +847,7 @@  unsigned long gfn_to_hva(struct kvm *kvm
 {
 	struct kvm_memory_slot *slot;
 
-	gfn = unalias_gfn(kvm, gfn);
+	gfn = unalias_gfn(kvm, gfn, true);
 	slot = gfn_to_memslot_unaliased(kvm, gfn);
 	if (!slot || slot->flags & KVM_MEMSLOT_INVALID)
 		return bad_hva();
@@ -1117,7 +1117,7 @@  void mark_page_dirty(struct kvm *kvm, gf
 {
 	struct kvm_memory_slot *memslot;
 
-	gfn = unalias_gfn(kvm, gfn);
+	gfn = unalias_gfn(kvm, gfn, false);
 	memslot = gfn_to_memslot_unaliased(kvm, gfn);
 	if (memslot && memslot->dirty_bitmap) {
 		unsigned long rel_gfn = gfn - memslot->base_gfn;
Index: kvm-slotslock/arch/ia64/kvm/kvm-ia64.c
===================================================================
--- kvm-slotslock.orig/arch/ia64/kvm/kvm-ia64.c
+++ kvm-slotslock/arch/ia64/kvm/kvm-ia64.c
@@ -1946,7 +1946,7 @@  int kvm_cpu_has_pending_timer(struct kvm
 	return vcpu->arch.timer_fired;
 }
 
-gfn_t unalias_gfn(struct kvm *kvm, gfn_t gfn)
+gfn_t unalias_gfn(struct kvm *kvm, gfn_t gfn, bool instantiation)
 {
 	return gfn;
 }
Index: kvm-slotslock/arch/powerpc/kvm/powerpc.c
===================================================================
--- kvm-slotslock.orig/arch/powerpc/kvm/powerpc.c
+++ kvm-slotslock/arch/powerpc/kvm/powerpc.c
@@ -34,7 +34,7 @@ 
 #define CREATE_TRACE_POINTS
 #include "trace.h"
 
-gfn_t unalias_gfn(struct kvm *kvm, gfn_t gfn)
+gfn_t unalias_gfn(struct kvm *kvm, gfn_t gfn, bool instantiation)
 {
 	return gfn;
 }
Index: kvm-slotslock/arch/s390/kvm/kvm-s390.c
===================================================================
--- kvm-slotslock.orig/arch/s390/kvm/kvm-s390.c
+++ kvm-slotslock/arch/s390/kvm/kvm-s390.c
@@ -730,7 +730,7 @@  void kvm_arch_flush_shadow(struct kvm *k
 {
 }
 
-gfn_t unalias_gfn(struct kvm *kvm, gfn_t gfn)
+gfn_t unalias_gfn(struct kvm *kvm, gfn_t gfn, bool instantiation)
 {
 	return gfn;
 }