diff mbox series

[1/2] KVM: x86: replace KVM_PAGES_PER_HPAGE with KVM_HPAGE_GFN_SIZE

Message ID 20180922015639.12666-1-richard.weiyang@gmail.com (mailing list archive)
State New, archived
Headers show
Series [1/2] KVM: x86: replace KVM_PAGES_PER_HPAGE with KVM_HPAGE_GFN_SIZE | expand

Commit Message

Wei Yang Sept. 22, 2018, 1:56 a.m. UTC
KVM_PAGES_PER_HPAGE is got by left shift (KVM_HPAGE_GFN_SHIFT + PAGE_SHIFT)
and then divide by PAGE_SIZE, which could be simplified by just left shift
KVM_HPAGE_GFN_SHIFT.

At the same time, in almost 40% places where KVM_PAGES_PER_HPAGE is used,
pfn mask is actually what it needs instead of the number of pages.

This patch replaces KVM_PAGES_PER_HPAGE with KVM_HPAGE_GFN_SIZE and
introduces KVM_HPAGE_GFN_MASK to make it a little bit easy to read.

Signed-off-by: Wei Yang <richard.weiyang@gmail.com>
---
 arch/x86/include/asm/kvm_host.h |  3 ++-
 arch/x86/kvm/mmu.c              | 10 +++++-----
 arch/x86/kvm/paging_tmpl.h      |  6 +++---
 arch/x86/kvm/x86.c              |  6 +++---
 4 files changed, 13 insertions(+), 12 deletions(-)

Comments

Wei Yang Nov. 3, 2018, 1:17 p.m. UTC | #1
Paolo,

Do you like this one?

On Sat, Sep 22, 2018 at 09:56:38AM +0800, Wei Yang wrote:
>KVM_PAGES_PER_HPAGE is got by left shift (KVM_HPAGE_GFN_SHIFT + PAGE_SHIFT)
>and then divide by PAGE_SIZE, which could be simplified by just left shift
>KVM_HPAGE_GFN_SHIFT.
>
>At the same time, in almost 40% places where KVM_PAGES_PER_HPAGE is used,
>pfn mask is actually what it needs instead of the number of pages.
>
>This patch replaces KVM_PAGES_PER_HPAGE with KVM_HPAGE_GFN_SIZE and
>introduces KVM_HPAGE_GFN_MASK to make it a little bit easy to read.
>
>Signed-off-by: Wei Yang <richard.weiyang@gmail.com>
>---
> arch/x86/include/asm/kvm_host.h |  3 ++-
> arch/x86/kvm/mmu.c              | 10 +++++-----
> arch/x86/kvm/paging_tmpl.h      |  6 +++---
> arch/x86/kvm/x86.c              |  6 +++---
> 4 files changed, 13 insertions(+), 12 deletions(-)
>
>diff --git a/arch/x86/include/asm/kvm_host.h b/arch/x86/include/asm/kvm_host.h
>index f1a4e520ef5c..c5e7bb811b1e 100644
>--- a/arch/x86/include/asm/kvm_host.h
>+++ b/arch/x86/include/asm/kvm_host.h
>@@ -104,10 +104,11 @@
> /* KVM Hugepage definitions for x86 */
> #define KVM_NR_PAGE_SIZES	3
> #define KVM_HPAGE_GFN_SHIFT(x)	(((x) - 1) * 9)
>+#define KVM_HPAGE_GFN_SIZE(x)	(1UL << KVM_HPAGE_GFN_SHIFT(x))
>+#define KVM_HPAGE_GFN_MASK(x)	(~(KVM_HPAGE_GFN_SIZE(x) - 1))
> #define KVM_HPAGE_SHIFT(x)	(PAGE_SHIFT + KVM_HPAGE_GFN_SHIFT(x))
> #define KVM_HPAGE_SIZE(x)	(1UL << KVM_HPAGE_SHIFT(x))
> #define KVM_HPAGE_MASK(x)	(~(KVM_HPAGE_SIZE(x) - 1))
>-#define KVM_PAGES_PER_HPAGE(x)	(KVM_HPAGE_SIZE(x) / PAGE_SIZE)
> 
> static inline gfn_t gfn_to_index(gfn_t gfn, gfn_t base_gfn, int level)
> {
>diff --git a/arch/x86/kvm/mmu.c b/arch/x86/kvm/mmu.c
>index 0caaaa25e88b..897614414311 100644
>--- a/arch/x86/kvm/mmu.c
>+++ b/arch/x86/kvm/mmu.c
>@@ -3170,7 +3170,7 @@ static void transparent_hugepage_adjust(struct kvm_vcpu *vcpu,
> 		 * head.
> 		 */
> 		*levelp = level = PT_DIRECTORY_LEVEL;
>-		mask = KVM_PAGES_PER_HPAGE(level) - 1;
>+		mask = KVM_HPAGE_GFN_SIZE(level) - 1;
> 		VM_BUG_ON((gfn & mask) != (pfn & mask));
> 		if (pfn & mask) {
> 			gfn &= ~mask;
>@@ -3416,7 +3416,7 @@ static int nonpaging_map(struct kvm_vcpu *vcpu, gva_t v, u32 error_code,
> 		if (level > PT_DIRECTORY_LEVEL)
> 			level = PT_DIRECTORY_LEVEL;
> 
>-		gfn &= ~(KVM_PAGES_PER_HPAGE(level) - 1);
>+		gfn &= KVM_HPAGE_GFN_MASK(level);
> 	}
> 
> 	if (fast_page_fault(vcpu, v, level, error_code))
>@@ -4018,9 +4018,9 @@ EXPORT_SYMBOL_GPL(kvm_handle_page_fault);
> static bool
> check_hugepage_cache_consistency(struct kvm_vcpu *vcpu, gfn_t gfn, int level)
> {
>-	int page_num = KVM_PAGES_PER_HPAGE(level);
>+	int page_num = KVM_HPAGE_GFN_SIZE(level);
> 
>-	gfn &= ~(page_num - 1);
>+	gfn &= KVM_HPAGE_GFN_MASK(level);
> 
> 	return kvm_mtrr_check_gfn_range_consistency(vcpu, gfn, page_num);
> }
>@@ -4053,7 +4053,7 @@ static int tdp_page_fault(struct kvm_vcpu *vcpu, gva_t gpa, u32 error_code,
> 		if (level > PT_DIRECTORY_LEVEL &&
> 		    !check_hugepage_cache_consistency(vcpu, gfn, level))
> 			level = PT_DIRECTORY_LEVEL;
>-		gfn &= ~(KVM_PAGES_PER_HPAGE(level) - 1);
>+		gfn &= KVM_HPAGE_GFN_MASK(level);
> 	}
> 
> 	if (fast_page_fault(vcpu, gpa, level, error_code))
>diff --git a/arch/x86/kvm/paging_tmpl.h b/arch/x86/kvm/paging_tmpl.h
>index 14ffd973df54..c8a242715cbb 100644
>--- a/arch/x86/kvm/paging_tmpl.h
>+++ b/arch/x86/kvm/paging_tmpl.h
>@@ -658,7 +658,7 @@ static int FNAME(fetch)(struct kvm_vcpu *vcpu, gva_t addr,
> 		if (is_shadow_present_pte(*it.sptep))
> 			continue;
> 
>-		direct_gfn = gw->gfn & ~(KVM_PAGES_PER_HPAGE(it.level) - 1);
>+		direct_gfn = gw->gfn & KVM_HPAGE_GFN_MASK(it.level);
> 
> 		sp = kvm_mmu_get_page(vcpu, direct_gfn, addr, it.level-1,
> 				      true, direct_access);
>@@ -700,7 +700,7 @@ FNAME(is_self_change_mapping)(struct kvm_vcpu *vcpu,
> 			      bool *write_fault_to_shadow_pgtable)
> {
> 	int level;
>-	gfn_t mask = ~(KVM_PAGES_PER_HPAGE(walker->level) - 1);
>+	gfn_t mask = KVM_HPAGE_GFN_MASK(walker->level);
> 	bool self_changed = false;
> 
> 	if (!(walker->pte_access & ACC_WRITE_MASK ||
>@@ -786,7 +786,7 @@ static int FNAME(page_fault)(struct kvm_vcpu *vcpu, gva_t addr, u32 error_code,
> 		level = mapping_level(vcpu, walker.gfn, &force_pt_level);
> 		if (likely(!force_pt_level)) {
> 			level = min(walker.level, level);
>-			walker.gfn = walker.gfn & ~(KVM_PAGES_PER_HPAGE(level) - 1);
>+			walker.gfn = walker.gfn & KVM_HPAGE_GFN_MASK(level);
> 		}
> 	} else
> 		force_pt_level = true;
>diff --git a/arch/x86/kvm/x86.c b/arch/x86/kvm/x86.c
>index f7dff0457846..70b4e5e74f7d 100644
>--- a/arch/x86/kvm/x86.c
>+++ b/arch/x86/kvm/x86.c
>@@ -9021,9 +9021,9 @@ int kvm_arch_create_memslot(struct kvm *kvm, struct kvm_memory_slot *slot,
> 
> 		slot->arch.lpage_info[i - 1] = linfo;
> 
>-		if (slot->base_gfn & (KVM_PAGES_PER_HPAGE(level) - 1))
>+		if (slot->base_gfn & (KVM_HPAGE_GFN_SIZE(level) - 1))
> 			linfo[0].disallow_lpage = 1;
>-		if ((slot->base_gfn + npages) & (KVM_PAGES_PER_HPAGE(level) - 1))
>+		if ((slot->base_gfn + npages) & (KVM_HPAGE_GFN_SIZE(level) - 1))
> 			linfo[lpages - 1].disallow_lpage = 1;
> 		ugfn = slot->userspace_addr >> PAGE_SHIFT;
> 		/*
>@@ -9031,7 +9031,7 @@ int kvm_arch_create_memslot(struct kvm *kvm, struct kvm_memory_slot *slot,
> 		 * other, or if explicitly asked to, disable large page
> 		 * support for this slot
> 		 */
>-		if ((slot->base_gfn ^ ugfn) & (KVM_PAGES_PER_HPAGE(level) - 1) ||
>+		if ((slot->base_gfn ^ ugfn) & (KVM_HPAGE_GFN_SIZE(level) - 1) ||
> 		    !kvm_largepages_enabled()) {
> 			unsigned long j;
> 
>-- 
>2.15.1
diff mbox series

Patch

diff --git a/arch/x86/include/asm/kvm_host.h b/arch/x86/include/asm/kvm_host.h
index f1a4e520ef5c..c5e7bb811b1e 100644
--- a/arch/x86/include/asm/kvm_host.h
+++ b/arch/x86/include/asm/kvm_host.h
@@ -104,10 +104,11 @@ 
 /* KVM Hugepage definitions for x86 */
 #define KVM_NR_PAGE_SIZES	3
 #define KVM_HPAGE_GFN_SHIFT(x)	(((x) - 1) * 9)
+#define KVM_HPAGE_GFN_SIZE(x)	(1UL << KVM_HPAGE_GFN_SHIFT(x))
+#define KVM_HPAGE_GFN_MASK(x)	(~(KVM_HPAGE_GFN_SIZE(x) - 1))
 #define KVM_HPAGE_SHIFT(x)	(PAGE_SHIFT + KVM_HPAGE_GFN_SHIFT(x))
 #define KVM_HPAGE_SIZE(x)	(1UL << KVM_HPAGE_SHIFT(x))
 #define KVM_HPAGE_MASK(x)	(~(KVM_HPAGE_SIZE(x) - 1))
-#define KVM_PAGES_PER_HPAGE(x)	(KVM_HPAGE_SIZE(x) / PAGE_SIZE)
 
 static inline gfn_t gfn_to_index(gfn_t gfn, gfn_t base_gfn, int level)
 {
diff --git a/arch/x86/kvm/mmu.c b/arch/x86/kvm/mmu.c
index 0caaaa25e88b..897614414311 100644
--- a/arch/x86/kvm/mmu.c
+++ b/arch/x86/kvm/mmu.c
@@ -3170,7 +3170,7 @@  static void transparent_hugepage_adjust(struct kvm_vcpu *vcpu,
 		 * head.
 		 */
 		*levelp = level = PT_DIRECTORY_LEVEL;
-		mask = KVM_PAGES_PER_HPAGE(level) - 1;
+		mask = KVM_HPAGE_GFN_SIZE(level) - 1;
 		VM_BUG_ON((gfn & mask) != (pfn & mask));
 		if (pfn & mask) {
 			gfn &= ~mask;
@@ -3416,7 +3416,7 @@  static int nonpaging_map(struct kvm_vcpu *vcpu, gva_t v, u32 error_code,
 		if (level > PT_DIRECTORY_LEVEL)
 			level = PT_DIRECTORY_LEVEL;
 
-		gfn &= ~(KVM_PAGES_PER_HPAGE(level) - 1);
+		gfn &= KVM_HPAGE_GFN_MASK(level);
 	}
 
 	if (fast_page_fault(vcpu, v, level, error_code))
@@ -4018,9 +4018,9 @@  EXPORT_SYMBOL_GPL(kvm_handle_page_fault);
 static bool
 check_hugepage_cache_consistency(struct kvm_vcpu *vcpu, gfn_t gfn, int level)
 {
-	int page_num = KVM_PAGES_PER_HPAGE(level);
+	int page_num = KVM_HPAGE_GFN_SIZE(level);
 
-	gfn &= ~(page_num - 1);
+	gfn &= KVM_HPAGE_GFN_MASK(level);
 
 	return kvm_mtrr_check_gfn_range_consistency(vcpu, gfn, page_num);
 }
@@ -4053,7 +4053,7 @@  static int tdp_page_fault(struct kvm_vcpu *vcpu, gva_t gpa, u32 error_code,
 		if (level > PT_DIRECTORY_LEVEL &&
 		    !check_hugepage_cache_consistency(vcpu, gfn, level))
 			level = PT_DIRECTORY_LEVEL;
-		gfn &= ~(KVM_PAGES_PER_HPAGE(level) - 1);
+		gfn &= KVM_HPAGE_GFN_MASK(level);
 	}
 
 	if (fast_page_fault(vcpu, gpa, level, error_code))
diff --git a/arch/x86/kvm/paging_tmpl.h b/arch/x86/kvm/paging_tmpl.h
index 14ffd973df54..c8a242715cbb 100644
--- a/arch/x86/kvm/paging_tmpl.h
+++ b/arch/x86/kvm/paging_tmpl.h
@@ -658,7 +658,7 @@  static int FNAME(fetch)(struct kvm_vcpu *vcpu, gva_t addr,
 		if (is_shadow_present_pte(*it.sptep))
 			continue;
 
-		direct_gfn = gw->gfn & ~(KVM_PAGES_PER_HPAGE(it.level) - 1);
+		direct_gfn = gw->gfn & KVM_HPAGE_GFN_MASK(it.level);
 
 		sp = kvm_mmu_get_page(vcpu, direct_gfn, addr, it.level-1,
 				      true, direct_access);
@@ -700,7 +700,7 @@  FNAME(is_self_change_mapping)(struct kvm_vcpu *vcpu,
 			      bool *write_fault_to_shadow_pgtable)
 {
 	int level;
-	gfn_t mask = ~(KVM_PAGES_PER_HPAGE(walker->level) - 1);
+	gfn_t mask = KVM_HPAGE_GFN_MASK(walker->level);
 	bool self_changed = false;
 
 	if (!(walker->pte_access & ACC_WRITE_MASK ||
@@ -786,7 +786,7 @@  static int FNAME(page_fault)(struct kvm_vcpu *vcpu, gva_t addr, u32 error_code,
 		level = mapping_level(vcpu, walker.gfn, &force_pt_level);
 		if (likely(!force_pt_level)) {
 			level = min(walker.level, level);
-			walker.gfn = walker.gfn & ~(KVM_PAGES_PER_HPAGE(level) - 1);
+			walker.gfn = walker.gfn & KVM_HPAGE_GFN_MASK(level);
 		}
 	} else
 		force_pt_level = true;
diff --git a/arch/x86/kvm/x86.c b/arch/x86/kvm/x86.c
index f7dff0457846..70b4e5e74f7d 100644
--- a/arch/x86/kvm/x86.c
+++ b/arch/x86/kvm/x86.c
@@ -9021,9 +9021,9 @@  int kvm_arch_create_memslot(struct kvm *kvm, struct kvm_memory_slot *slot,
 
 		slot->arch.lpage_info[i - 1] = linfo;
 
-		if (slot->base_gfn & (KVM_PAGES_PER_HPAGE(level) - 1))
+		if (slot->base_gfn & (KVM_HPAGE_GFN_SIZE(level) - 1))
 			linfo[0].disallow_lpage = 1;
-		if ((slot->base_gfn + npages) & (KVM_PAGES_PER_HPAGE(level) - 1))
+		if ((slot->base_gfn + npages) & (KVM_HPAGE_GFN_SIZE(level) - 1))
 			linfo[lpages - 1].disallow_lpage = 1;
 		ugfn = slot->userspace_addr >> PAGE_SHIFT;
 		/*
@@ -9031,7 +9031,7 @@  int kvm_arch_create_memslot(struct kvm *kvm, struct kvm_memory_slot *slot,
 		 * other, or if explicitly asked to, disable large page
 		 * support for this slot
 		 */
-		if ((slot->base_gfn ^ ugfn) & (KVM_PAGES_PER_HPAGE(level) - 1) ||
+		if ((slot->base_gfn ^ ugfn) & (KVM_HPAGE_GFN_SIZE(level) - 1) ||
 		    !kvm_largepages_enabled()) {
 			unsigned long j;