diff mbox

[v3] x86/shadow: Correct guest behaviour when creating PTEs above maxphysaddr

Message ID 1487259954-11248-1-git-send-email-andrew.cooper3@citrix.com (mailing list archive)
State New, archived
Headers show

Commit Message

Andrew Cooper Feb. 16, 2017, 3:45 p.m. UTC
XSA-173 (c/s 8b1764833) introduces gfn_bits, and an upper limit which might be
lower than the real maxphysaddr, to avoid overflowing the superpage shadow
backpointer.

However, plenty of hardware has a physical address width less that 44 bits,
and the code added in shadow_domain_init() is a straight assignment.  This
causes gfn_bits to be increased beyond the physical address width on most
Intel consumer hardware (typically a width of 39, which is the number reported
to the guest via CPUID).

If the guest intentionally creates a PTE referencing a physical address
between 39 and 44 bits, the result should be #PF[RSVD] for using the virtual
address.  However, the shadow code accepts the PTE, shadows it, and the
virtual address works normally.

Introduce paging_max_paddr_bits() to calculate the largest guest physical
address supportable by the paging infrastructure, and update
recalculate_cpuid_policy() to take this into account when clamping the guests
cpuid_policy to reality.

There is an existing gfn_valid() in guest_pt.h but it is unused in the
codebase.  Repurpose it to perform a guest-specific maxphysaddr check, which
replaces the users of gfn_bits.

Signed-off-by: Andrew Cooper <andrew.cooper3@citrix.com>
---
CC: Jan Beulich <JBeulich@suse.com>
CC: Tim Deegan <tim@xen.org>
CC: George Dunlap <george.dunlap@eu.citrix.com>
CC: Jun Nakajima <jun.nakajima@intel.com>
CC: Kevin Tian <kevin.tian@intel.com>

v3:
 * Retain pse36 maxphysaddr logic.
 * Repurpose gfn_valid().

v2:
 * Introduce paging_max_paddr_bits() rather than moving paging logic into
   recalculate_cpuid_policy().
 * Rewrite half of the commit message.
---
 xen/arch/x86/cpuid.c            |  3 ++-
 xen/arch/x86/hvm/vmx/vvmx.c     |  3 +--
 xen/arch/x86/mm/guest_walk.c    |  3 +--
 xen/arch/x86/mm/hap/hap.c       |  2 --
 xen/arch/x86/mm/p2m.c           |  2 +-
 xen/arch/x86/mm/shadow/common.c | 10 ----------
 xen/arch/x86/mm/shadow/multi.c  |  2 +-
 xen/include/asm-x86/domain.h    |  3 ---
 xen/include/asm-x86/guest_pt.h  |  6 ------
 xen/include/asm-x86/paging.h    | 21 +++++++++++++++++++++
 10 files changed, 27 insertions(+), 28 deletions(-)

Comments

Jan Beulich Feb. 16, 2017, 4:47 p.m. UTC | #1
>>> On 16.02.17 at 16:45, <andrew.cooper3@citrix.com> wrote:
> XSA-173 (c/s 8b1764833) introduces gfn_bits, and an upper limit which might be
> lower than the real maxphysaddr, to avoid overflowing the superpage shadow
> backpointer.
> 
> However, plenty of hardware has a physical address width less that 44 bits,
> and the code added in shadow_domain_init() is a straight assignment.  This
> causes gfn_bits to be increased beyond the physical address width on most
> Intel consumer hardware (typically a width of 39, which is the number reported
> to the guest via CPUID).
> 
> If the guest intentionally creates a PTE referencing a physical address
> between 39 and 44 bits, the result should be #PF[RSVD] for using the virtual
> address.  However, the shadow code accepts the PTE, shadows it, and the
> virtual address works normally.
> 
> Introduce paging_max_paddr_bits() to calculate the largest guest physical
> address supportable by the paging infrastructure, and update
> recalculate_cpuid_policy() to take this into account when clamping the guests
> cpuid_policy to reality.
> 
> There is an existing gfn_valid() in guest_pt.h but it is unused in the
> codebase.  Repurpose it to perform a guest-specific maxphysaddr check, which
> replaces the users of gfn_bits.
> 
> Signed-off-by: Andrew Cooper <andrew.cooper3@citrix.com>

Reviewed-by: Jan Beulich <jbeulich@suse.com>
George Dunlap Feb. 16, 2017, 5:32 p.m. UTC | #2
On 16/02/17 15:45, Andrew Cooper wrote:
> XSA-173 (c/s 8b1764833) introduces gfn_bits, and an upper limit which might be
> lower than the real maxphysaddr, to avoid overflowing the superpage shadow
> backpointer.
> 
> However, plenty of hardware has a physical address width less that 44 bits,
> and the code added in shadow_domain_init() is a straight assignment.  This
> causes gfn_bits to be increased beyond the physical address width on most
> Intel consumer hardware (typically a width of 39, which is the number reported
> to the guest via CPUID).
> 
> If the guest intentionally creates a PTE referencing a physical address
> between 39 and 44 bits, the result should be #PF[RSVD] for using the virtual
> address.  However, the shadow code accepts the PTE, shadows it, and the
> virtual address works normally.
> 
> Introduce paging_max_paddr_bits() to calculate the largest guest physical
> address supportable by the paging infrastructure, and update
> recalculate_cpuid_policy() to take this into account when clamping the guests
> cpuid_policy to reality.
> 
> There is an existing gfn_valid() in guest_pt.h but it is unused in the
> codebase.  Repurpose it to perform a guest-specific maxphysaddr check, which
> replaces the users of gfn_bits.
> 
> Signed-off-by: Andrew Cooper <andrew.cooper3@citrix.com>

Acked-by: George Dunlap <george.dunlap@citrix.com>

> ---
> CC: Jan Beulich <JBeulich@suse.com>
> CC: Tim Deegan <tim@xen.org>
> CC: George Dunlap <george.dunlap@eu.citrix.com>
> CC: Jun Nakajima <jun.nakajima@intel.com>
> CC: Kevin Tian <kevin.tian@intel.com>
> 
> v3:
>  * Retain pse36 maxphysaddr logic.
>  * Repurpose gfn_valid().
> 
> v2:
>  * Introduce paging_max_paddr_bits() rather than moving paging logic into
>    recalculate_cpuid_policy().
>  * Rewrite half of the commit message.
> ---
>  xen/arch/x86/cpuid.c            |  3 ++-
>  xen/arch/x86/hvm/vmx/vvmx.c     |  3 +--
>  xen/arch/x86/mm/guest_walk.c    |  3 +--
>  xen/arch/x86/mm/hap/hap.c       |  2 --
>  xen/arch/x86/mm/p2m.c           |  2 +-
>  xen/arch/x86/mm/shadow/common.c | 10 ----------
>  xen/arch/x86/mm/shadow/multi.c  |  2 +-
>  xen/include/asm-x86/domain.h    |  3 ---
>  xen/include/asm-x86/guest_pt.h  |  6 ------
>  xen/include/asm-x86/paging.h    | 21 +++++++++++++++++++++
>  10 files changed, 27 insertions(+), 28 deletions(-)
> 
> diff --git a/xen/arch/x86/cpuid.c b/xen/arch/x86/cpuid.c
> index e0a387e..07d24da 100644
> --- a/xen/arch/x86/cpuid.c
> +++ b/xen/arch/x86/cpuid.c
> @@ -6,6 +6,7 @@
>  #include <asm/hvm/nestedhvm.h>
>  #include <asm/hvm/svm/svm.h>
>  #include <asm/hvm/vmx/vmcs.h>
> +#include <asm/paging.h>
>  #include <asm/processor.h>
>  #include <asm/xstate.h>
>  
> @@ -504,7 +505,7 @@ void recalculate_cpuid_policy(struct domain *d)
>  
>      p->extd.maxphysaddr = min(p->extd.maxphysaddr, max->extd.maxphysaddr);
>      p->extd.maxphysaddr = min_t(uint8_t, p->extd.maxphysaddr,
> -                                d->arch.paging.gfn_bits + PAGE_SHIFT);
> +                                paging_max_paddr_bits(d));
>      p->extd.maxphysaddr = max_t(uint8_t, p->extd.maxphysaddr,
>                                  (p->basic.pae || p->basic.pse36) ? 36 : 32);
>  
> diff --git a/xen/arch/x86/hvm/vmx/vvmx.c b/xen/arch/x86/hvm/vmx/vvmx.c
> index f6a25a6..74775dd 100644
> --- a/xen/arch/x86/hvm/vmx/vvmx.c
> +++ b/xen/arch/x86/hvm/vmx/vvmx.c
> @@ -1420,8 +1420,7 @@ int nvmx_handle_vmxon(struct cpu_user_regs *regs)
>          return X86EMUL_OKAY;
>      }
>  
> -    if ( (gpa & ~PAGE_MASK) ||
> -         (gpa >> (v->domain->arch.paging.gfn_bits + PAGE_SHIFT)) )
> +    if ( (gpa & ~PAGE_MASK) || !gfn_valid(v->domain, _gfn(gpa >> PAGE_SHIFT)) )
>      {
>          vmfail_invalid(regs);
>          return X86EMUL_OKAY;
> diff --git a/xen/arch/x86/mm/guest_walk.c b/xen/arch/x86/mm/guest_walk.c
> index a67fd5a..faaf70c 100644
> --- a/xen/arch/x86/mm/guest_walk.c
> +++ b/xen/arch/x86/mm/guest_walk.c
> @@ -434,8 +434,7 @@ guest_walk_tables(struct vcpu *v, struct p2m_domain *p2m,
>  
>      /* If this guest has a restricted physical address space then the
>       * target GFN must fit within it. */
> -    if ( !(rc & _PAGE_PRESENT)
> -         && gfn_x(guest_l1e_get_gfn(gw->l1e)) >> d->arch.paging.gfn_bits )
> +    if ( !(rc & _PAGE_PRESENT) && !gfn_valid(d, guest_l1e_get_gfn(gw->l1e)) )
>          rc |= _PAGE_INVALID_BITS;
>  
>      return rc;
> diff --git a/xen/arch/x86/mm/hap/hap.c b/xen/arch/x86/mm/hap/hap.c
> index b5870bf..d7cd8da 100644
> --- a/xen/arch/x86/mm/hap/hap.c
> +++ b/xen/arch/x86/mm/hap/hap.c
> @@ -446,8 +446,6 @@ void hap_domain_init(struct domain *d)
>  {
>      INIT_PAGE_LIST_HEAD(&d->arch.paging.hap.freelist);
>  
> -    d->arch.paging.gfn_bits = hap_paddr_bits - PAGE_SHIFT;
> -
>      /* Use HAP logdirty mechanism. */
>      paging_log_dirty_init(d, hap_enable_log_dirty,
>                            hap_disable_log_dirty,
> diff --git a/xen/arch/x86/mm/p2m.c b/xen/arch/x86/mm/p2m.c
> index 0c1820e..cf3d6b0 100644
> --- a/xen/arch/x86/mm/p2m.c
> +++ b/xen/arch/x86/mm/p2m.c
> @@ -1784,7 +1784,7 @@ void *map_domain_gfn(struct p2m_domain *p2m, gfn_t gfn, mfn_t *mfn,
>  {
>      struct page_info *page;
>  
> -    if ( gfn_x(gfn) >> p2m->domain->arch.paging.gfn_bits )
> +    if ( !gfn_valid(p2m->domain, gfn) )
>      {
>          *rc = _PAGE_INVALID_BIT;
>          return NULL;
> diff --git a/xen/arch/x86/mm/shadow/common.c b/xen/arch/x86/mm/shadow/common.c
> index 1c9d9b9..51d6bdf 100644
> --- a/xen/arch/x86/mm/shadow/common.c
> +++ b/xen/arch/x86/mm/shadow/common.c
> @@ -51,16 +51,6 @@ int shadow_domain_init(struct domain *d, unsigned int domcr_flags)
>      INIT_PAGE_LIST_HEAD(&d->arch.paging.shadow.freelist);
>      INIT_PAGE_LIST_HEAD(&d->arch.paging.shadow.pinned_shadows);
>  
> -    d->arch.paging.gfn_bits = paddr_bits - PAGE_SHIFT;
> -#ifndef CONFIG_BIGMEM
> -    /*
> -     * Shadowed superpages store GFNs in 32-bit page_info fields.
> -     * Note that we cannot use guest_supports_superpages() here.
> -     */
> -    if ( !is_pv_domain(d) || opt_allow_superpage )
> -        d->arch.paging.gfn_bits = 32;
> -#endif
> -
>      /* Use shadow pagetables for log-dirty support */
>      paging_log_dirty_init(d, sh_enable_log_dirty,
>                            sh_disable_log_dirty, sh_clean_dirty_bitmap);
> diff --git a/xen/arch/x86/mm/shadow/multi.c b/xen/arch/x86/mm/shadow/multi.c
> index d4090d7..128809d 100644
> --- a/xen/arch/x86/mm/shadow/multi.c
> +++ b/xen/arch/x86/mm/shadow/multi.c
> @@ -537,7 +537,7 @@ _sh_propagate(struct vcpu *v,
>  
>      /* Check there's something for the shadows to map to */
>      if ( (!p2m_is_valid(p2mt) && !p2m_is_grant(p2mt))
> -         || gfn_x(target_gfn) >> d->arch.paging.gfn_bits )
> +         || !gfn_valid(d, target_gfn) )
>      {
>          *sp = shadow_l1e_empty();
>          goto done;
> diff --git a/xen/include/asm-x86/domain.h b/xen/include/asm-x86/domain.h
> index 9bb070f..ee5f2d3 100644
> --- a/xen/include/asm-x86/domain.h
> +++ b/xen/include/asm-x86/domain.h
> @@ -195,9 +195,6 @@ struct paging_domain {
>      /* log dirty support */
>      struct log_dirty_domain log_dirty;
>  
> -    /* Number of valid bits in a gfn. */
> -    unsigned int gfn_bits;
> -
>      /* preemption handling */
>      struct {
>          const struct domain *dom;
> diff --git a/xen/include/asm-x86/guest_pt.h b/xen/include/asm-x86/guest_pt.h
> index 3ec9ace..0bf6cf9 100644
> --- a/xen/include/asm-x86/guest_pt.h
> +++ b/xen/include/asm-x86/guest_pt.h
> @@ -34,12 +34,6 @@
>  
>  #define VALID_GFN(m) (m != gfn_x(INVALID_GFN))
>  
> -static inline int
> -valid_gfn(gfn_t m)
> -{
> -    return VALID_GFN(gfn_x(m));
> -}
> -
>  static inline paddr_t
>  gfn_to_paddr(gfn_t gfn)
>  {
> diff --git a/xen/include/asm-x86/paging.h b/xen/include/asm-x86/paging.h
> index cec6bfd..51808fc 100644
> --- a/xen/include/asm-x86/paging.h
> +++ b/xen/include/asm-x86/paging.h
> @@ -360,6 +360,27 @@ void paging_dump_vcpu_info(struct vcpu *v);
>  int paging_set_allocation(struct domain *d, unsigned int pages,
>                            bool *preempted);
>  
> +/* Is gfn within maxphysaddr for the domain? */
> +static inline bool gfn_valid(const struct domain *d, gfn_t gfn)
> +{
> +    return !(gfn_x(gfn) >> (d->arch.cpuid->extd.maxphysaddr - PAGE_SHIFT));
> +}
> +
> +/* Maxphysaddr supportable by the paging infrastructure. */
> +static inline unsigned int paging_max_paddr_bits(const struct domain *d)
> +{
> +    unsigned int bits = paging_mode_hap(d) ? hap_paddr_bits : paddr_bits;
> +
> +    if ( !IS_ENABLED(BIGMEM) && paging_mode_shadow(d) &&
> +         (!is_pv_domain(d) || opt_allow_superpage) )
> +    {
> +        /* Shadowed superpages store GFNs in 32-bit page_info fields. */
> +        bits = min(bits, 32U + PAGE_SHIFT);
> +    }
> +
> +    return bits;
> +}
> +
>  #endif /* XEN_PAGING_H */
>  
>  /*
>
Tian, Kevin Feb. 17, 2017, 3:23 a.m. UTC | #3
> From: Andrew Cooper [mailto:andrew.cooper3@citrix.com]
> Sent: Thursday, February 16, 2017 11:46 PM
> 
> XSA-173 (c/s 8b1764833) introduces gfn_bits, and an upper limit which might be
> lower than the real maxphysaddr, to avoid overflowing the superpage shadow
> backpointer.
> 
> However, plenty of hardware has a physical address width less that 44 bits,
> and the code added in shadow_domain_init() is a straight assignment.  This
> causes gfn_bits to be increased beyond the physical address width on most
> Intel consumer hardware (typically a width of 39, which is the number reported
> to the guest via CPUID).
> 
> If the guest intentionally creates a PTE referencing a physical address
> between 39 and 44 bits, the result should be #PF[RSVD] for using the virtual
> address.  However, the shadow code accepts the PTE, shadows it, and the
> virtual address works normally.
> 
> Introduce paging_max_paddr_bits() to calculate the largest guest physical
> address supportable by the paging infrastructure, and update
> recalculate_cpuid_policy() to take this into account when clamping the guests
> cpuid_policy to reality.
> 
> There is an existing gfn_valid() in guest_pt.h but it is unused in the
> codebase.  Repurpose it to perform a guest-specific maxphysaddr check, which
> replaces the users of gfn_bits.
> 
> Signed-off-by: Andrew Cooper <andrew.cooper3@citrix.com>

Reviewed-by: Kevin Tian <kevin.tian@intel.com>
Tim Deegan Feb. 20, 2017, 10:45 a.m. UTC | #4
At 15:45 +0000 on 16 Feb (1487259954), Andrew Cooper wrote:
> XSA-173 (c/s 8b1764833) introduces gfn_bits, and an upper limit which might be
> lower than the real maxphysaddr, to avoid overflowing the superpage shadow
> backpointer.
> 
> However, plenty of hardware has a physical address width less that 44 bits,
> and the code added in shadow_domain_init() is a straight assignment.  This
> causes gfn_bits to be increased beyond the physical address width on most
> Intel consumer hardware (typically a width of 39, which is the number reported
> to the guest via CPUID).
> 
> If the guest intentionally creates a PTE referencing a physical address
> between 39 and 44 bits, the result should be #PF[RSVD] for using the virtual
> address.  However, the shadow code accepts the PTE, shadows it, and the
> virtual address works normally.
> 
> Introduce paging_max_paddr_bits() to calculate the largest guest physical
> address supportable by the paging infrastructure, and update
> recalculate_cpuid_policy() to take this into account when clamping the guests
> cpuid_policy to reality.
> 
> There is an existing gfn_valid() in guest_pt.h but it is unused in the
> codebase.  Repurpose it to perform a guest-specific maxphysaddr check, which
> replaces the users of gfn_bits.
> 
> Signed-off-by: Andrew Cooper <andrew.cooper3@citrix.com>

Reviewed-by: Tim Deegan <tim@xen.org>
diff mbox

Patch

diff --git a/xen/arch/x86/cpuid.c b/xen/arch/x86/cpuid.c
index e0a387e..07d24da 100644
--- a/xen/arch/x86/cpuid.c
+++ b/xen/arch/x86/cpuid.c
@@ -6,6 +6,7 @@ 
 #include <asm/hvm/nestedhvm.h>
 #include <asm/hvm/svm/svm.h>
 #include <asm/hvm/vmx/vmcs.h>
+#include <asm/paging.h>
 #include <asm/processor.h>
 #include <asm/xstate.h>
 
@@ -504,7 +505,7 @@  void recalculate_cpuid_policy(struct domain *d)
 
     p->extd.maxphysaddr = min(p->extd.maxphysaddr, max->extd.maxphysaddr);
     p->extd.maxphysaddr = min_t(uint8_t, p->extd.maxphysaddr,
-                                d->arch.paging.gfn_bits + PAGE_SHIFT);
+                                paging_max_paddr_bits(d));
     p->extd.maxphysaddr = max_t(uint8_t, p->extd.maxphysaddr,
                                 (p->basic.pae || p->basic.pse36) ? 36 : 32);
 
diff --git a/xen/arch/x86/hvm/vmx/vvmx.c b/xen/arch/x86/hvm/vmx/vvmx.c
index f6a25a6..74775dd 100644
--- a/xen/arch/x86/hvm/vmx/vvmx.c
+++ b/xen/arch/x86/hvm/vmx/vvmx.c
@@ -1420,8 +1420,7 @@  int nvmx_handle_vmxon(struct cpu_user_regs *regs)
         return X86EMUL_OKAY;
     }
 
-    if ( (gpa & ~PAGE_MASK) ||
-         (gpa >> (v->domain->arch.paging.gfn_bits + PAGE_SHIFT)) )
+    if ( (gpa & ~PAGE_MASK) || !gfn_valid(v->domain, _gfn(gpa >> PAGE_SHIFT)) )
     {
         vmfail_invalid(regs);
         return X86EMUL_OKAY;
diff --git a/xen/arch/x86/mm/guest_walk.c b/xen/arch/x86/mm/guest_walk.c
index a67fd5a..faaf70c 100644
--- a/xen/arch/x86/mm/guest_walk.c
+++ b/xen/arch/x86/mm/guest_walk.c
@@ -434,8 +434,7 @@  guest_walk_tables(struct vcpu *v, struct p2m_domain *p2m,
 
     /* If this guest has a restricted physical address space then the
      * target GFN must fit within it. */
-    if ( !(rc & _PAGE_PRESENT)
-         && gfn_x(guest_l1e_get_gfn(gw->l1e)) >> d->arch.paging.gfn_bits )
+    if ( !(rc & _PAGE_PRESENT) && !gfn_valid(d, guest_l1e_get_gfn(gw->l1e)) )
         rc |= _PAGE_INVALID_BITS;
 
     return rc;
diff --git a/xen/arch/x86/mm/hap/hap.c b/xen/arch/x86/mm/hap/hap.c
index b5870bf..d7cd8da 100644
--- a/xen/arch/x86/mm/hap/hap.c
+++ b/xen/arch/x86/mm/hap/hap.c
@@ -446,8 +446,6 @@  void hap_domain_init(struct domain *d)
 {
     INIT_PAGE_LIST_HEAD(&d->arch.paging.hap.freelist);
 
-    d->arch.paging.gfn_bits = hap_paddr_bits - PAGE_SHIFT;
-
     /* Use HAP logdirty mechanism. */
     paging_log_dirty_init(d, hap_enable_log_dirty,
                           hap_disable_log_dirty,
diff --git a/xen/arch/x86/mm/p2m.c b/xen/arch/x86/mm/p2m.c
index 0c1820e..cf3d6b0 100644
--- a/xen/arch/x86/mm/p2m.c
+++ b/xen/arch/x86/mm/p2m.c
@@ -1784,7 +1784,7 @@  void *map_domain_gfn(struct p2m_domain *p2m, gfn_t gfn, mfn_t *mfn,
 {
     struct page_info *page;
 
-    if ( gfn_x(gfn) >> p2m->domain->arch.paging.gfn_bits )
+    if ( !gfn_valid(p2m->domain, gfn) )
     {
         *rc = _PAGE_INVALID_BIT;
         return NULL;
diff --git a/xen/arch/x86/mm/shadow/common.c b/xen/arch/x86/mm/shadow/common.c
index 1c9d9b9..51d6bdf 100644
--- a/xen/arch/x86/mm/shadow/common.c
+++ b/xen/arch/x86/mm/shadow/common.c
@@ -51,16 +51,6 @@  int shadow_domain_init(struct domain *d, unsigned int domcr_flags)
     INIT_PAGE_LIST_HEAD(&d->arch.paging.shadow.freelist);
     INIT_PAGE_LIST_HEAD(&d->arch.paging.shadow.pinned_shadows);
 
-    d->arch.paging.gfn_bits = paddr_bits - PAGE_SHIFT;
-#ifndef CONFIG_BIGMEM
-    /*
-     * Shadowed superpages store GFNs in 32-bit page_info fields.
-     * Note that we cannot use guest_supports_superpages() here.
-     */
-    if ( !is_pv_domain(d) || opt_allow_superpage )
-        d->arch.paging.gfn_bits = 32;
-#endif
-
     /* Use shadow pagetables for log-dirty support */
     paging_log_dirty_init(d, sh_enable_log_dirty,
                           sh_disable_log_dirty, sh_clean_dirty_bitmap);
diff --git a/xen/arch/x86/mm/shadow/multi.c b/xen/arch/x86/mm/shadow/multi.c
index d4090d7..128809d 100644
--- a/xen/arch/x86/mm/shadow/multi.c
+++ b/xen/arch/x86/mm/shadow/multi.c
@@ -537,7 +537,7 @@  _sh_propagate(struct vcpu *v,
 
     /* Check there's something for the shadows to map to */
     if ( (!p2m_is_valid(p2mt) && !p2m_is_grant(p2mt))
-         || gfn_x(target_gfn) >> d->arch.paging.gfn_bits )
+         || !gfn_valid(d, target_gfn) )
     {
         *sp = shadow_l1e_empty();
         goto done;
diff --git a/xen/include/asm-x86/domain.h b/xen/include/asm-x86/domain.h
index 9bb070f..ee5f2d3 100644
--- a/xen/include/asm-x86/domain.h
+++ b/xen/include/asm-x86/domain.h
@@ -195,9 +195,6 @@  struct paging_domain {
     /* log dirty support */
     struct log_dirty_domain log_dirty;
 
-    /* Number of valid bits in a gfn. */
-    unsigned int gfn_bits;
-
     /* preemption handling */
     struct {
         const struct domain *dom;
diff --git a/xen/include/asm-x86/guest_pt.h b/xen/include/asm-x86/guest_pt.h
index 3ec9ace..0bf6cf9 100644
--- a/xen/include/asm-x86/guest_pt.h
+++ b/xen/include/asm-x86/guest_pt.h
@@ -34,12 +34,6 @@ 
 
 #define VALID_GFN(m) (m != gfn_x(INVALID_GFN))
 
-static inline int
-valid_gfn(gfn_t m)
-{
-    return VALID_GFN(gfn_x(m));
-}
-
 static inline paddr_t
 gfn_to_paddr(gfn_t gfn)
 {
diff --git a/xen/include/asm-x86/paging.h b/xen/include/asm-x86/paging.h
index cec6bfd..51808fc 100644
--- a/xen/include/asm-x86/paging.h
+++ b/xen/include/asm-x86/paging.h
@@ -360,6 +360,27 @@  void paging_dump_vcpu_info(struct vcpu *v);
 int paging_set_allocation(struct domain *d, unsigned int pages,
                           bool *preempted);
 
+/* Is gfn within maxphysaddr for the domain? */
+static inline bool gfn_valid(const struct domain *d, gfn_t gfn)
+{
+    return !(gfn_x(gfn) >> (d->arch.cpuid->extd.maxphysaddr - PAGE_SHIFT));
+}
+
+/* Maxphysaddr supportable by the paging infrastructure. */
+static inline unsigned int paging_max_paddr_bits(const struct domain *d)
+{
+    unsigned int bits = paging_mode_hap(d) ? hap_paddr_bits : paddr_bits;
+
+    if ( !IS_ENABLED(BIGMEM) && paging_mode_shadow(d) &&
+         (!is_pv_domain(d) || opt_allow_superpage) )
+    {
+        /* Shadowed superpages store GFNs in 32-bit page_info fields. */
+        bits = min(bits, 32U + PAGE_SHIFT);
+    }
+
+    return bits;
+}
+
 #endif /* XEN_PAGING_H */
 
 /*