diff mbox

[6/7] kvm mmu: enabling 1GB pages by extending backing_size funtion

Message ID 1238164319-16092-7-git-send-email-joerg.roedel@amd.com (mailing list archive)
State New, archived
Headers show

Commit Message

Joerg Roedel March 27, 2009, 2:31 p.m. UTC
This patch enables support for 1GB pages in KVM by implementing
the support in backing_size().

Signed-off-by: Joerg Roedel <joerg.roedel@amd.com>
---
 arch/x86/kvm/mmu.c |   30 ++++++++++++++++++++++--------
 1 files changed, 22 insertions(+), 8 deletions(-)

Comments

Avi Kivity March 29, 2009, 11:51 a.m. UTC | #1
Joerg Roedel wrote:
> This patch enables support for 1GB pages in KVM by implementing
> the support in backing_size().
>
> @@ -490,18 +492,30 @@ static enum kvm_page_size host_page_size(struct kvm *kvm, gfn_t gfn)
>  static enum kvm_page_size backing_size(struct kvm_vcpu *vcpu, gfn_t gfn)
>  {
>  	struct kvm_memory_slot *slot;
> -
> -	if (has_wrprotected_page(vcpu->kvm, gfn))
> -		return KVM_PAGE_SIZE_4k;
> -
> -	if (host_page_size(vcpu->kvm, gfn) < KVM_PAGE_SIZE_2M)
> -		return KVM_PAGE_SIZE_4k;
> +	enum kvm_page_size host_size, ret;
>  
>  	slot = gfn_to_memslot(vcpu->kvm, gfn);
>  	if (slot && slot->dirty_bitmap)
>  		return KVM_PAGE_SIZE_4k;
>  
> -	return KVM_PAGE_SIZE_2M;
> +	host_size = host_page_size(vcpu->kvm, gfn);
> +
> +	switch (host_size) {
> +	case KVM_PAGE_SIZE_1G:
> +		if (!has_wrprotected_largepage(vcpu->kvm, gfn)) {
> +			ret = KVM_PAGE_SIZE_1G;
> +			break;
> +		}
>   

What if there's a wrprotected_page in there?

> +	case KVM_PAGE_SIZE_2M:
> +		if (!has_wrprotected_page(vcpu->kvm, gfn)) {
> +			ret = KVM_PAGE_SIZE_2M;
> +			break;
> +		}
> +	default:
> +		ret = KVM_PAGE_SIZE_4k;
> +	}
> +
> +	return ret;
>  }
>  
>  /*
>
diff mbox

Patch

diff --git a/arch/x86/kvm/mmu.c b/arch/x86/kvm/mmu.c
index 3f5e20b..471e5d0 100644
--- a/arch/x86/kvm/mmu.c
+++ b/arch/x86/kvm/mmu.c
@@ -479,7 +479,9 @@  static enum kvm_page_size host_page_size(struct kvm *kvm, gfn_t gfn)
 	vma = find_vma(current->mm, addr);
 	if (vma) {
 		size = vma_kernel_pagesize(vma);
-		if (size >= KVM_PAGE_SIZE_2M)
+		if (size == KVM_PAGE_SIZE_1G)
+			ret = KVM_PAGE_SIZE_1G;
+		else if (size >= KVM_PAGE_SIZE_2M)
 			ret = KVM_PAGE_SIZE_2M;
 	}
 	up_read(&current->mm->mmap_sem);
@@ -490,18 +492,30 @@  static enum kvm_page_size host_page_size(struct kvm *kvm, gfn_t gfn)
 static enum kvm_page_size backing_size(struct kvm_vcpu *vcpu, gfn_t gfn)
 {
 	struct kvm_memory_slot *slot;
-
-	if (has_wrprotected_page(vcpu->kvm, gfn))
-		return KVM_PAGE_SIZE_4k;
-
-	if (host_page_size(vcpu->kvm, gfn) < KVM_PAGE_SIZE_2M)
-		return KVM_PAGE_SIZE_4k;
+	enum kvm_page_size host_size, ret;
 
 	slot = gfn_to_memslot(vcpu->kvm, gfn);
 	if (slot && slot->dirty_bitmap)
 		return KVM_PAGE_SIZE_4k;
 
-	return KVM_PAGE_SIZE_2M;
+	host_size = host_page_size(vcpu->kvm, gfn);
+
+	switch (host_size) {
+	case KVM_PAGE_SIZE_1G:
+		if (!has_wrprotected_largepage(vcpu->kvm, gfn)) {
+			ret = KVM_PAGE_SIZE_1G;
+			break;
+		}
+	case KVM_PAGE_SIZE_2M:
+		if (!has_wrprotected_page(vcpu->kvm, gfn)) {
+			ret = KVM_PAGE_SIZE_2M;
+			break;
+		}
+	default:
+		ret = KVM_PAGE_SIZE_4k;
+	}
+
+	return ret;
 }
 
 /*