From patchwork Mon Sep 21 23:37:14 2009 Content-Type: text/plain; charset="utf-8" MIME-Version: 1.0 Content-Transfer-Encoding: 7bit X-Patchwork-Submitter: Marcelo Tosatti X-Patchwork-Id: 49133 Received: from vger.kernel.org (vger.kernel.org [209.132.176.167]) by demeter.kernel.org (8.14.2/8.14.2) with ESMTP id n8LNgUPP002796 for ; Mon, 21 Sep 2009 23:42:30 GMT Received: (majordomo@vger.kernel.org) by vger.kernel.org via listexpand id S1754515AbZIUXmV (ORCPT ); Mon, 21 Sep 2009 19:42:21 -0400 Received: (majordomo@vger.kernel.org) by vger.kernel.org id S1754474AbZIUXmV (ORCPT ); Mon, 21 Sep 2009 19:42:21 -0400 Received: from mx1.redhat.com ([209.132.183.28]:10796 "EHLO mx1.redhat.com" rhost-flags-OK-OK-OK-OK) by vger.kernel.org with ESMTP id S1754504AbZIUXmR (ORCPT ); Mon, 21 Sep 2009 19:42:17 -0400 Received: from int-mx01.intmail.prod.int.phx2.redhat.com (int-mx01.intmail.prod.int.phx2.redhat.com [10.5.11.11]) by mx1.redhat.com (8.13.8/8.13.8) with ESMTP id n8LNgLAH005199 for ; Mon, 21 Sep 2009 19:42:21 -0400 Received: from ns3.rdu.redhat.com (ns3.rdu.redhat.com [10.11.255.199]) by int-mx01.intmail.prod.int.phx2.redhat.com (8.13.8/8.13.8) with ESMTP id n8LNgKXr026920; Mon, 21 Sep 2009 19:42:21 -0400 Received: from amt.cnet (vpn-10-8.str.redhat.com [10.32.10.8]) by ns3.rdu.redhat.com (8.13.8/8.13.8) with ESMTP id n8LNgGrx029396; Mon, 21 Sep 2009 19:42:18 -0400 Received: from amt.cnet (amt.cnet [127.0.0.1]) by amt.cnet (Postfix) with ESMTP id 77132588072; Mon, 21 Sep 2009 20:41:52 -0300 (BRT) Received: (from marcelo@localhost) by amt.cnet (8.14.3/8.14.3/Submit) id n8LNfobL028242; Mon, 21 Sep 2009 20:41:50 -0300 Message-Id: <20090921234124.205345161@amt.cnet> User-Agent: quilt/0.47-1 Date: Mon, 21 Sep 2009 20:37:14 -0300 From: Marcelo Tosatti To: kvm@vger.kernel.org Cc: avi@redhat.com, Marcelo Tosatti Subject: [patch 03/10] KVM: switch dirty_log to mmu_lock protection References: <20090921233711.213665413@amt.cnet> Content-Disposition: inline; filename=get-dirty-log X-Scanned-By: MIMEDefang 2.67 on 10.5.11.11 Sender: kvm-owner@vger.kernel.org Precedence: bulk List-ID: X-Mailing-List: kvm@vger.kernel.org get_dirty_log vs mark_page_dirty need to be mutually exclusive. Switch to mmu_lock protection. Signed-off-by: Marcelo Tosatti --- To unsubscribe from this list: send the line "unsubscribe kvm" in the body of a message to majordomo@vger.kernel.org More majordomo info at http://vger.kernel.org/majordomo-info.html Index: kvm-slotslock/arch/x86/kvm/paging_tmpl.h =================================================================== --- kvm-slotslock.orig/arch/x86/kvm/paging_tmpl.h +++ kvm-slotslock/arch/x86/kvm/paging_tmpl.h @@ -175,7 +175,9 @@ walk: if (!(pte & PT_ACCESSED_MASK)) { trace_kvm_mmu_set_accessed_bit(table_gfn, index, sizeof(pte)); + spin_lock(&vcpu->kvm->mmu_lock); mark_page_dirty(vcpu->kvm, table_gfn); + spin_unlock(&vcpu->kvm->mmu_lock); if (FNAME(cmpxchg_gpte)(vcpu->kvm, table_gfn, index, pte, pte|PT_ACCESSED_MASK)) goto walk; @@ -215,7 +217,9 @@ walk: bool ret; trace_kvm_mmu_set_dirty_bit(table_gfn, index, sizeof(pte)); + spin_lock(&vcpu->kvm->mmu_lock); mark_page_dirty(vcpu->kvm, table_gfn); + spin_unlock(&vcpu->kvm->mmu_lock); ret = FNAME(cmpxchg_gpte)(vcpu->kvm, table_gfn, index, pte, pte|PT_DIRTY_MASK); if (ret) Index: kvm-slotslock/arch/x86/kvm/x86.c =================================================================== --- kvm-slotslock.orig/arch/x86/kvm/x86.c +++ kvm-slotslock/arch/x86/kvm/x86.c @@ -692,7 +692,9 @@ static void kvm_write_guest_time(struct kunmap_atomic(shared_kaddr, KM_USER0); + spin_lock(&v->kvm->mmu_lock); mark_page_dirty(v->kvm, vcpu->time >> PAGE_SHIFT); + spin_unlock(&v->kvm->mmu_lock); } static int kvm_request_guest_time_update(struct kvm_vcpu *v) @@ -2147,27 +2149,45 @@ static int kvm_vm_ioctl_reinject(struct int kvm_vm_ioctl_get_dirty_log(struct kvm *kvm, struct kvm_dirty_log *log) { - int r; - int n; + int r, n, i; struct kvm_memory_slot *memslot; - int is_dirty = 0; + unsigned long is_dirty = 0; + unsigned long *dirty_bitmap; down_write(&kvm->slots_lock); - r = kvm_get_dirty_log(kvm, log, &is_dirty); - if (r) + r = -EINVAL; + if (log->slot >= KVM_MEMORY_SLOTS) + goto out; + + memslot = &kvm->memslots->memslots[log->slot]; + r = -ENOENT; + if (!memslot->dirty_bitmap) + goto out; + + n = ALIGN(memslot->npages, BITS_PER_LONG) / 8; + r = -ENOMEM; + dirty_bitmap = vmalloc(n); + if (!dirty_bitmap) goto out; + memset(dirty_bitmap, 0, n); + + spin_lock(&kvm->mmu_lock); + for (i = 0; !is_dirty && i < n/sizeof(long); ++i) + is_dirty = memslot->dirty_bitmap[i]; /* If nothing is dirty, don't bother messing with page tables. */ if (is_dirty) { - spin_lock(&kvm->mmu_lock); + memcpy(dirty_bitmap, memslot->dirty_bitmap, n); + memset(memslot->dirty_bitmap, 0, n); kvm_mmu_slot_remove_write_access(kvm, log->slot); - spin_unlock(&kvm->mmu_lock); memslot = &kvm->memslots->memslots[log->slot]; - n = ALIGN(memslot->npages, BITS_PER_LONG) / 8; - memset(memslot->dirty_bitmap, 0, n); } + spin_unlock(&kvm->mmu_lock); + r = 0; + if (copy_to_user(log->dirty_bitmap, dirty_bitmap, n)) + r = -EFAULT; out: up_write(&kvm->slots_lock); return r; @@ -3491,7 +3511,9 @@ static void vapic_exit(struct kvm_vcpu * down_read(&vcpu->kvm->slots_lock); kvm_release_page_dirty(apic->vapic_page); + spin_lock(&vcpu->kvm->mmu_lock); mark_page_dirty(vcpu->kvm, apic->vapic_addr >> PAGE_SHIFT); + spin_unlock(&vcpu->kvm->mmu_lock); up_read(&vcpu->kvm->slots_lock); } Index: kvm-slotslock/virt/kvm/kvm_main.c =================================================================== --- kvm-slotslock.orig/virt/kvm/kvm_main.c +++ kvm-slotslock/virt/kvm/kvm_main.c @@ -1007,7 +1007,9 @@ int kvm_write_guest_page(struct kvm *kvm r = copy_to_user((void __user *)addr + offset, data, len); if (r) return -EFAULT; + spin_lock(&kvm->mmu_lock); mark_page_dirty(kvm, gfn); + spin_unlock(&kvm->mmu_lock); return 0; } EXPORT_SYMBOL_GPL(kvm_write_guest_page);