diff mbox series

[3/3] KVM: x86/mmu: Use hugepage GFN mask to compute GFN offset mask

Message ID 20201027214300.1342-4-sean.j.christopherson@intel.com (mailing list archive)
State New, archived
Headers show
Series KVM: x86/mmu: Add macro for hugepage GFN mask | expand

Commit Message

Sean Christopherson Oct. 27, 2020, 9:43 p.m. UTC
Use the logical NOT of KVM_HPAGE_GFN_MASK() to compute the GFN offset
mask instead of open coding the equivalent in a variety of locations.

Signed-off-by: Sean Christopherson <sean.j.christopherson@intel.com>
---
 arch/x86/kvm/mmu/mmu.c      | 2 +-
 arch/x86/kvm/mmu/mmutrace.h | 2 +-
 arch/x86/kvm/mmu/tdp_mmu.c  | 2 +-
 arch/x86/kvm/x86.c          | 6 +++---
 4 files changed, 6 insertions(+), 6 deletions(-)

Comments

Ben Gardon Oct. 27, 2020, 10:09 p.m. UTC | #1
On Tue, Oct 27, 2020 at 2:43 PM Sean Christopherson
<sean.j.christopherson@intel.com> wrote:
>
> Use the logical NOT of KVM_HPAGE_GFN_MASK() to compute the GFN offset
> mask instead of open coding the equivalent in a variety of locations.

I don't see a "no functional change expected" note on this patch as
was on the previous one, but I don't think this represents any
functional change.

>
> Signed-off-by: Sean Christopherson <sean.j.christopherson@intel.com>

Reviewed-by: Ben Gardon <bgardon@google.com>

> ---
>  arch/x86/kvm/mmu/mmu.c      | 2 +-
>  arch/x86/kvm/mmu/mmutrace.h | 2 +-
>  arch/x86/kvm/mmu/tdp_mmu.c  | 2 +-
>  arch/x86/kvm/x86.c          | 6 +++---
>  4 files changed, 6 insertions(+), 6 deletions(-)
>
> diff --git a/arch/x86/kvm/mmu/mmu.c b/arch/x86/kvm/mmu/mmu.c
> index 3bfc7ee44e51..9fb50c666ec5 100644
> --- a/arch/x86/kvm/mmu/mmu.c
> +++ b/arch/x86/kvm/mmu/mmu.c
> @@ -2827,7 +2827,7 @@ int kvm_mmu_hugepage_adjust(struct kvm_vcpu *vcpu, gfn_t gfn,
>          * mmu_notifier_retry() was successful and mmu_lock is held, so
>          * the pmd can't be split from under us.
>          */
> -       mask = KVM_PAGES_PER_HPAGE(level) - 1;
> +       mask = ~KVM_HPAGE_GFN_MASK(level);
>         VM_BUG_ON((gfn & mask) != (pfn & mask));
>         *pfnp = pfn & ~mask;
>
> diff --git a/arch/x86/kvm/mmu/mmutrace.h b/arch/x86/kvm/mmu/mmutrace.h
> index 213699b27b44..4432ca3c7e4e 100644
> --- a/arch/x86/kvm/mmu/mmutrace.h
> +++ b/arch/x86/kvm/mmu/mmutrace.h
> @@ -372,7 +372,7 @@ TRACE_EVENT(
>
>         TP_fast_assign(
>                 __entry->gfn = addr >> PAGE_SHIFT;
> -               __entry->pfn = pfn | (__entry->gfn & (KVM_PAGES_PER_HPAGE(level) - 1));
> +               __entry->pfn = pfn | (__entry->gfn & ~KVM_HPAGE_GFN_MASK(level));
>                 __entry->level = level;
>         ),
>
> diff --git a/arch/x86/kvm/mmu/tdp_mmu.c b/arch/x86/kvm/mmu/tdp_mmu.c
> index 27e381c9da6c..681686608c0b 100644
> --- a/arch/x86/kvm/mmu/tdp_mmu.c
> +++ b/arch/x86/kvm/mmu/tdp_mmu.c
> @@ -209,7 +209,7 @@ static void __handle_changed_spte(struct kvm *kvm, int as_id, gfn_t gfn,
>
>         WARN_ON(level > PT64_ROOT_MAX_LEVEL);
>         WARN_ON(level < PG_LEVEL_4K);
> -       WARN_ON(gfn & (KVM_PAGES_PER_HPAGE(level) - 1));
> +       WARN_ON(gfn & ~KVM_HPAGE_GFN_MASK(level));
>
>         /*
>          * If this warning were to trigger it would indicate that there was a
> diff --git a/arch/x86/kvm/x86.c b/arch/x86/kvm/x86.c
> index 397f599b20e5..faf4c4ddde94 100644
> --- a/arch/x86/kvm/x86.c
> +++ b/arch/x86/kvm/x86.c
> @@ -10451,16 +10451,16 @@ static int kvm_alloc_memslot_metadata(struct kvm_memory_slot *slot,
>
>                 slot->arch.lpage_info[i - 1] = linfo;
>
> -               if (slot->base_gfn & (KVM_PAGES_PER_HPAGE(level) - 1))
> +               if (slot->base_gfn & ~KVM_HPAGE_GFN_MASK(level))
>                         linfo[0].disallow_lpage = 1;
> -               if ((slot->base_gfn + npages) & (KVM_PAGES_PER_HPAGE(level) - 1))
> +               if ((slot->base_gfn + npages) & ~KVM_HPAGE_GFN_MASK(level))
>                         linfo[lpages - 1].disallow_lpage = 1;
>                 ugfn = slot->userspace_addr >> PAGE_SHIFT;
>                 /*
>                  * If the gfn and userspace address are not aligned wrt each
>                  * other, disable large page support for this slot.
>                  */
> -               if ((slot->base_gfn ^ ugfn) & (KVM_PAGES_PER_HPAGE(level) - 1)) {
> +               if ((slot->base_gfn ^ ugfn) & ~KVM_HPAGE_GFN_MASK(level)) {
>                         unsigned long j;
>
>                         for (j = 0; j < lpages; ++j)
> --
> 2.28.0
>
Sean Christopherson Oct. 27, 2020, 10:15 p.m. UTC | #2
On Tue, Oct 27, 2020 at 03:09:11PM -0700, Ben Gardon wrote:
> On Tue, Oct 27, 2020 at 2:43 PM Sean Christopherson
> <sean.j.christopherson@intel.com> wrote:
> >
> > Use the logical NOT of KVM_HPAGE_GFN_MASK() to compute the GFN offset
> > mask instead of open coding the equivalent in a variety of locations.
> 
> I don't see a "no functional change expected" note on this patch as
> was on the previous one, but I don't think this represents any
> functional change.

Ah, yeah, I meant to call out in the cover letter than nothing in this series
generates a functional difference, e.g. objdump of kvm/kvm-intel is identical
from start to finish.
diff mbox series

Patch

diff --git a/arch/x86/kvm/mmu/mmu.c b/arch/x86/kvm/mmu/mmu.c
index 3bfc7ee44e51..9fb50c666ec5 100644
--- a/arch/x86/kvm/mmu/mmu.c
+++ b/arch/x86/kvm/mmu/mmu.c
@@ -2827,7 +2827,7 @@  int kvm_mmu_hugepage_adjust(struct kvm_vcpu *vcpu, gfn_t gfn,
 	 * mmu_notifier_retry() was successful and mmu_lock is held, so
 	 * the pmd can't be split from under us.
 	 */
-	mask = KVM_PAGES_PER_HPAGE(level) - 1;
+	mask = ~KVM_HPAGE_GFN_MASK(level);
 	VM_BUG_ON((gfn & mask) != (pfn & mask));
 	*pfnp = pfn & ~mask;
 
diff --git a/arch/x86/kvm/mmu/mmutrace.h b/arch/x86/kvm/mmu/mmutrace.h
index 213699b27b44..4432ca3c7e4e 100644
--- a/arch/x86/kvm/mmu/mmutrace.h
+++ b/arch/x86/kvm/mmu/mmutrace.h
@@ -372,7 +372,7 @@  TRACE_EVENT(
 
 	TP_fast_assign(
 		__entry->gfn = addr >> PAGE_SHIFT;
-		__entry->pfn = pfn | (__entry->gfn & (KVM_PAGES_PER_HPAGE(level) - 1));
+		__entry->pfn = pfn | (__entry->gfn & ~KVM_HPAGE_GFN_MASK(level));
 		__entry->level = level;
 	),
 
diff --git a/arch/x86/kvm/mmu/tdp_mmu.c b/arch/x86/kvm/mmu/tdp_mmu.c
index 27e381c9da6c..681686608c0b 100644
--- a/arch/x86/kvm/mmu/tdp_mmu.c
+++ b/arch/x86/kvm/mmu/tdp_mmu.c
@@ -209,7 +209,7 @@  static void __handle_changed_spte(struct kvm *kvm, int as_id, gfn_t gfn,
 
 	WARN_ON(level > PT64_ROOT_MAX_LEVEL);
 	WARN_ON(level < PG_LEVEL_4K);
-	WARN_ON(gfn & (KVM_PAGES_PER_HPAGE(level) - 1));
+	WARN_ON(gfn & ~KVM_HPAGE_GFN_MASK(level));
 
 	/*
 	 * If this warning were to trigger it would indicate that there was a
diff --git a/arch/x86/kvm/x86.c b/arch/x86/kvm/x86.c
index 397f599b20e5..faf4c4ddde94 100644
--- a/arch/x86/kvm/x86.c
+++ b/arch/x86/kvm/x86.c
@@ -10451,16 +10451,16 @@  static int kvm_alloc_memslot_metadata(struct kvm_memory_slot *slot,
 
 		slot->arch.lpage_info[i - 1] = linfo;
 
-		if (slot->base_gfn & (KVM_PAGES_PER_HPAGE(level) - 1))
+		if (slot->base_gfn & ~KVM_HPAGE_GFN_MASK(level))
 			linfo[0].disallow_lpage = 1;
-		if ((slot->base_gfn + npages) & (KVM_PAGES_PER_HPAGE(level) - 1))
+		if ((slot->base_gfn + npages) & ~KVM_HPAGE_GFN_MASK(level))
 			linfo[lpages - 1].disallow_lpage = 1;
 		ugfn = slot->userspace_addr >> PAGE_SHIFT;
 		/*
 		 * If the gfn and userspace address are not aligned wrt each
 		 * other, disable large page support for this slot.
 		 */
-		if ((slot->base_gfn ^ ugfn) & (KVM_PAGES_PER_HPAGE(level) - 1)) {
+		if ((slot->base_gfn ^ ugfn) & ~KVM_HPAGE_GFN_MASK(level)) {
 			unsigned long j;
 
 			for (j = 0; j < lpages; ++j)