diff mbox

[04/11] KVM: MMU: do not mark access bit on pte write path

Message ID 4E2EA4C8.5010103@cn.fujitsu.com (mailing list archive)
State New, archived
Headers show

Commit Message

Xiao Guangrong July 26, 2011, 11:28 a.m. UTC
In current code, the accessed bit is always set when page fault occurred,
do not need to set it on pte write path

Signed-off-by: Xiao Guangrong <xiaoguangrong@cn.fujitsu.com>
---
 arch/x86/include/asm/kvm_host.h |    1 -
 arch/x86/kvm/mmu.c              |   22 +---------------------
 2 files changed, 1 insertions(+), 22 deletions(-)

Comments

Avi Kivity July 27, 2011, 9:08 a.m. UTC | #1
On 07/26/2011 02:28 PM, Xiao Guangrong wrote:
> In current code, the accessed bit is always set when page fault occurred,
> do not need to set it on pte write path
>

Is this true? a write with pte.w=pte.a=0 sets pte.a?
Xiao Guangrong July 27, 2011, 10:04 a.m. UTC | #2
On 07/27/2011 05:08 PM, Avi Kivity wrote:
> On 07/26/2011 02:28 PM, Xiao Guangrong wrote:
>> In current code, the accessed bit is always set when page fault occurred,
>> do not need to set it on pte write path
>>
> 
> Is this true? a write with pte.w=pte.a=0 sets pte.a?
> 

Generally, we will call set_spte() with speculative = false, then Accessed bit
is set on page fault path, but there has two case:
- if pte.d = 1, everything is ok
- if pte.d = 0, so this write can change pte.d = 1, the access permission of
  shadow page can be changed from read-only to writable. So, we will find a
  new sp to establish the mapping. In this case, the original spte is not
  set Accessed bit.

However, we used Accessed bit to detect write flooding, so the first write can
set the dirty bit, and the later write can not cause sp changed. I think it is
not too bad.
--
To unsubscribe from this list: send the line "unsubscribe kvm" in
the body of a message to majordomo@vger.kernel.org
More majordomo info at  http://vger.kernel.org/majordomo-info.html
diff mbox

Patch

diff --git a/arch/x86/include/asm/kvm_host.h b/arch/x86/include/asm/kvm_host.h
index 95f55a9..ad88bfb 100644
--- a/arch/x86/include/asm/kvm_host.h
+++ b/arch/x86/include/asm/kvm_host.h
@@ -355,7 +355,6 @@  struct kvm_vcpu_arch {
 	gfn_t last_pt_write_gfn;
 	int   last_pt_write_count;
 	u64  *last_pte_updated;
-	gfn_t last_pte_gfn;
 
 	struct fpu guest_fpu;
 	u64 xcr0;
diff --git a/arch/x86/kvm/mmu.c b/arch/x86/kvm/mmu.c
index 5193de8..992a8a5 100644
--- a/arch/x86/kvm/mmu.c
+++ b/arch/x86/kvm/mmu.c
@@ -2196,11 +2196,6 @@  static int set_spte(struct kvm_vcpu *vcpu, u64 *sptep,
 	if (set_mmio_spte(sptep, gfn, pfn, pte_access))
 		return 0;
 
-	/*
-	 * We don't set the accessed bit, since we sometimes want to see
-	 * whether the guest actually used the pte (in order to detect
-	 * demand paging).
-	 */
 	spte = PT_PRESENT_MASK;
 	if (!speculative)
 		spte |= shadow_accessed_mask;
@@ -2351,10 +2346,8 @@  static void mmu_set_spte(struct kvm_vcpu *vcpu, u64 *sptep,
 		}
 	}
 	kvm_release_pfn_clean(pfn);
-	if (speculative) {
+	if (speculative)
 		vcpu->arch.last_pte_updated = sptep;
-		vcpu->arch.last_pte_gfn = gfn;
-	}
 }
 
 static void nonpaging_new_cr3(struct kvm_vcpu *vcpu)
@@ -3519,18 +3512,6 @@  static bool last_updated_pte_accessed(struct kvm_vcpu *vcpu)
 	return !!(spte && (*spte & shadow_accessed_mask));
 }
 
-static void kvm_mmu_access_page(struct kvm_vcpu *vcpu, gfn_t gfn)
-{
-	u64 *spte = vcpu->arch.last_pte_updated;
-
-	if (spte
-	    && vcpu->arch.last_pte_gfn == gfn
-	    && shadow_accessed_mask
-	    && !(*spte & shadow_accessed_mask)
-	    && is_shadow_present_pte(*spte))
-		set_bit(PT_ACCESSED_SHIFT, (unsigned long *)spte);
-}
-
 static int get_free_pte_list_desc_nr(struct kvm_vcpu *vcpu)
 {
 	struct kvm_mmu_memory_cache *cache;
@@ -3604,7 +3585,6 @@  void kvm_mmu_pte_write(struct kvm_vcpu *vcpu, gpa_t gpa,
 	++vcpu->kvm->stat.mmu_pte_write;
 	trace_kvm_mmu_audit(vcpu, AUDIT_PRE_PTE_WRITE);
 	if (guest_initiated) {
-		kvm_mmu_access_page(vcpu, gfn);
 		if (gfn == vcpu->arch.last_pt_write_gfn
 		    && !last_updated_pte_accessed(vcpu)) {
 			++vcpu->arch.last_pt_write_count;