diff mbox series

[04/17] xen: Convert virt_to_mfn() and mfn_to_virt() to use typesafe MFN

Message ID 20200322161418.31606-5-julien@xen.org (mailing list archive)
State New, archived
Headers show
Series Bunch of typesafe conversion | expand

Commit Message

Julien Grall March 22, 2020, 4:14 p.m. UTC
From: Julien Grall <jgrall@amazon.com>

Most of Xen is now either override the helpers virt_to_mfn() and
mfn_to_virt() to use typesafe MFN or use mfn_x() to remove the
typesafety when calling the helpers.

Therefore it is time to switch the two helpers to use typesafe MFN and
remove completely the possibly to make them unsafe by dropping the
double-underscore version.

Places that were still using non-typesafe MFN have been either
converted to use typesafe (if the changes are simple) or use
_mfn()/mfn_x() until the rest of the code is changed.

There are a couple of noticeable changes in the code:
    - pvh_populate_p2m() were storing the mfn in a variable called
      'addr'. This has now been renamed to 'mfn'.
    - allocate_cache_aligned_memnodemap() were storing an address in a
      variable called 'mfn'. The code has been reworked to avoid
      repurposing the variable.

No functional changes intended.

Signed-off-by: Julien Grall <jgrall@amazon.com>
---
 xen/arch/arm/acpi/domain_build.c    |  4 ----
 xen/arch/arm/alternative.c          |  4 ----
 xen/arch/arm/cpuerrata.c            |  4 ----
 xen/arch/arm/domain_build.c         |  4 ----
 xen/arch/arm/livepatch.c            |  4 ----
 xen/arch/arm/mm.c                   |  8 +-------
 xen/arch/x86/domain_page.c          | 10 +++++-----
 xen/arch/x86/hvm/dom0_build.c       | 20 ++++++++++---------
 xen/arch/x86/mm.c                   | 30 +++++++++++++----------------
 xen/arch/x86/numa.c                 |  8 +++-----
 xen/arch/x86/pv/descriptor-tables.c |  2 +-
 xen/arch/x86/pv/dom0_build.c        |  4 ++--
 xen/arch/x86/pv/shim.c              |  3 ---
 xen/arch/x86/setup.c                | 10 +++++-----
 xen/arch/x86/smpboot.c              |  4 ++--
 xen/arch/x86/srat.c                 |  2 +-
 xen/arch/x86/tboot.c                |  4 ++--
 xen/arch/x86/traps.c                |  4 ++--
 xen/arch/x86/x86_64/mm.c            | 13 +++++++------
 xen/common/domctl.c                 |  3 ++-
 xen/common/efi/boot.c               |  7 ++++---
 xen/common/grant_table.c            |  8 ++++----
 xen/common/page_alloc.c             | 18 ++++++++---------
 xen/common/trace.c                  | 19 +++++++++---------
 xen/common/xenoprof.c               |  4 ----
 xen/drivers/acpi/osl.c              |  2 +-
 xen/include/asm-arm/mm.h            | 14 +++-----------
 xen/include/asm-x86/grant_table.h   |  4 ++--
 xen/include/asm-x86/mm.h            |  2 +-
 xen/include/asm-x86/page.h          |  6 ++----
 xen/include/xen/domain_page.h       |  6 +++---
 31 files changed, 96 insertions(+), 139 deletions(-)

Comments

Jan Beulich March 25, 2020, 3:27 p.m. UTC | #1
On 22.03.2020 17:14, julien@xen.org wrote:
> @@ -785,21 +781,21 @@ bool is_iomem_page(mfn_t mfn)
>      return (page_get_owner(page) == dom_io);
>  }
>  
> -static int update_xen_mappings(unsigned long mfn, unsigned int cacheattr)
> +static int update_xen_mappings(mfn_t mfn, unsigned int cacheattr)
>  {
>      int err = 0;
> -    bool alias = mfn >= PFN_DOWN(xen_phys_start) &&
> -         mfn < PFN_UP(xen_phys_start + xen_virt_end - XEN_VIRT_START);
> +    bool alias = mfn_x(mfn) >= PFN_DOWN(xen_phys_start) &&
> +         mfn_x(mfn) < PFN_UP(xen_phys_start + xen_virt_end - XEN_VIRT_START);
>      unsigned long xen_va =
> -        XEN_VIRT_START + ((mfn - PFN_DOWN(xen_phys_start)) << PAGE_SHIFT);
> +        XEN_VIRT_START + mfn_to_maddr(mfn_add(mfn, -PFN_DOWN(xen_phys_start)));

Depending on the types involved (e.g. in PFN_DOWN()) this may
or may not be safe, so I consider such a transformation at
least fragile. I think we either want to gain mfn_sub() or
keep this as a "real" subtraction.

> @@ -584,21 +584,21 @@ static unsigned long init_node_heap(int node, unsigned long mfn,
>          needed = 0;
>      }
>      else if ( *use_tail && nr >= needed &&
> -              arch_mfn_in_directmap(mfn + nr) &&
> +              arch_mfn_in_directmap(mfn_x(mfn_add(mfn, nr))) &&
>                (!xenheap_bits ||
> -               !((mfn + nr - 1) >> (xenheap_bits - PAGE_SHIFT))) )
> +               !((mfn_x(mfn) + nr - 1) >> (xenheap_bits - PAGE_SHIFT))) )

May I suggest consistency here: This one uses +, while ...

>      {
> -        _heap[node] = mfn_to_virt(mfn + nr - needed);
> -        avail[node] = mfn_to_virt(mfn + nr - 1) +
> +        _heap[node] = mfn_to_virt(mfn_add(mfn, nr - needed));
> +        avail[node] = mfn_to_virt(mfn_add(mfn, nr - 1)) +
>                        PAGE_SIZE - sizeof(**avail) * NR_ZONES;
>      }
>      else if ( nr >= needed &&
> -              arch_mfn_in_directmap(mfn + needed) &&
> +              arch_mfn_in_directmap(mfn_x(mfn_add(mfn, needed))) &&

... this one uses mfn_add() despite the mfn_x() around it, and ...

>                (!xenheap_bits ||
> -               !((mfn + needed - 1) >> (xenheap_bits - PAGE_SHIFT))) )
> +               !((mfn_x(mfn) + needed - 1) >> (xenheap_bits - PAGE_SHIFT))) )

... here you use + again. My personal preference would be to avoid
constructs like mfn_x(mfn_add()).

> @@ -269,10 +270,10 @@ out_dealloc:
>              continue;
>          for ( i = 0; i < pages; i++ )
>          {
> -            uint32_t mfn = t_info_mfn_list[offset + i];
> -            if ( !mfn )
> +            mfn_t mfn = _mfn(t_info_mfn_list[offset + i]);
> +            if ( mfn_eq(mfn, _mfn(0)) )

Please could you take the opportunity and add the missing blank line
between these two?

> --- a/xen/include/asm-x86/mm.h
> +++ b/xen/include/asm-x86/mm.h
> @@ -667,7 +667,7 @@ static inline bool arch_mfn_in_directmap(unsigned long mfn)
>  {
>      unsigned long eva = min(DIRECTMAP_VIRT_END, HYPERVISOR_VIRT_END);
>  
> -    return mfn <= (virt_to_mfn(eva - 1) + 1);
> +    return mfn <= mfn_x(mfn_add(virt_to_mfn(eva - 1),  1));

Even if you wanted to stick to using mfn_add() here, there's one
blank too many after the comma.

With these taken care of
Reviewed-by: Jan Beulich <jbeulich@suse.com>

Jan
Julien Grall March 25, 2020, 6:21 p.m. UTC | #2
Hi Jan,

On 25/03/2020 15:27, Jan Beulich wrote:
> On 22.03.2020 17:14, julien@xen.org wrote:
>> @@ -785,21 +781,21 @@ bool is_iomem_page(mfn_t mfn)
>>       return (page_get_owner(page) == dom_io);
>>   }
>>   
>> -static int update_xen_mappings(unsigned long mfn, unsigned int cacheattr)
>> +static int update_xen_mappings(mfn_t mfn, unsigned int cacheattr)
>>   {
>>       int err = 0;
>> -    bool alias = mfn >= PFN_DOWN(xen_phys_start) &&
>> -         mfn < PFN_UP(xen_phys_start + xen_virt_end - XEN_VIRT_START);
>> +    bool alias = mfn_x(mfn) >= PFN_DOWN(xen_phys_start) &&
>> +         mfn_x(mfn) < PFN_UP(xen_phys_start + xen_virt_end - XEN_VIRT_START);
>>       unsigned long xen_va =
>> -        XEN_VIRT_START + ((mfn - PFN_DOWN(xen_phys_start)) << PAGE_SHIFT);
>> +        XEN_VIRT_START + mfn_to_maddr(mfn_add(mfn, -PFN_DOWN(xen_phys_start)));
> 
> Depending on the types involved (e.g. in PFN_DOWN()) this may
> or may not be safe, so I consider such a transformation at
> least fragile. I think we either want to gain mfn_sub() or
> keep this as a "real" subtraction.
I want to avoid mfn_x() as much as possible when everything can be done 
using typesafe operation. But i am not sure how mfn_sub() would solve 
the problem. Do you mind providing more information?

> 
>> @@ -584,21 +584,21 @@ static unsigned long init_node_heap(int node, unsigned long mfn,
>>           needed = 0;
>>       }
>>       else if ( *use_tail && nr >= needed &&
>> -              arch_mfn_in_directmap(mfn + nr) &&
>> +              arch_mfn_in_directmap(mfn_x(mfn_add(mfn, nr))) &&
>>                 (!xenheap_bits ||
>> -               !((mfn + nr - 1) >> (xenheap_bits - PAGE_SHIFT))) )
>> +               !((mfn_x(mfn) + nr - 1) >> (xenheap_bits - PAGE_SHIFT))) )
> 
> May I suggest consistency here: This one uses +, while ...
> 
>>       {
>> -        _heap[node] = mfn_to_virt(mfn + nr - needed);
>> -        avail[node] = mfn_to_virt(mfn + nr - 1) +
>> +        _heap[node] = mfn_to_virt(mfn_add(mfn, nr - needed));
>> +        avail[node] = mfn_to_virt(mfn_add(mfn, nr - 1)) +
>>                         PAGE_SIZE - sizeof(**avail) * NR_ZONES;
>>       }
>>       else if ( nr >= needed &&
>> -              arch_mfn_in_directmap(mfn + needed) &&
>> +              arch_mfn_in_directmap(mfn_x(mfn_add(mfn, needed))) &&
> 
> ... this one uses mfn_add() despite the mfn_x() around it, and ...

So the reason I used mfn_x(mfn_add(mfn, needed)) here is I plan to 
convert arch_mfn_in_directmap() to use typesafe soon. In the two others 
cases...

>>                 (!xenheap_bits ||
>> -               !((mfn + needed - 1) >> (xenheap_bits - PAGE_SHIFT))) )
>> +               !((mfn_x(mfn) + needed - 1) >> (xenheap_bits - PAGE_SHIFT))) )
> 
> ... here you use + again. My personal preference would be to avoid
> constructs like mfn_x(mfn_add()).

... I am still unsure how to avoid mfn_x(). Do you have any ideas?
> 
>> @@ -269,10 +270,10 @@ out_dealloc:
>>               continue;
>>           for ( i = 0; i < pages; i++ )
>>           {
>> -            uint32_t mfn = t_info_mfn_list[offset + i];
>> -            if ( !mfn )
>> +            mfn_t mfn = _mfn(t_info_mfn_list[offset + i]);
>> +            if ( mfn_eq(mfn, _mfn(0)) )
> 
> Please could you take the opportunity and add the missing blank line
> between these two?

Sure.

> 
>> --- a/xen/include/asm-x86/mm.h
>> +++ b/xen/include/asm-x86/mm.h
>> @@ -667,7 +667,7 @@ static inline bool arch_mfn_in_directmap(unsigned long mfn)
>>   {
>>       unsigned long eva = min(DIRECTMAP_VIRT_END, HYPERVISOR_VIRT_END);
>>   
>> -    return mfn <= (virt_to_mfn(eva - 1) + 1);
>> +    return mfn <= mfn_x(mfn_add(virt_to_mfn(eva - 1),  1));
> 
> Even if you wanted to stick to using mfn_add() here, there's one
> blank too many after the comma.

I will remove the extra blank. Regarding the construction, I have been 
wondering for a couple of years now whether we should introduce mfn_{lt, 
gt}. What do you think?


> 
> With these taken care of
> Reviewed-by: Jan Beulich <jbeulich@suse.com>
Thank you for the review.

Cheers,
Jan Beulich March 26, 2020, 9:09 a.m. UTC | #3
On 25.03.2020 19:21, Julien Grall wrote:
> On 25/03/2020 15:27, Jan Beulich wrote:
>> On 22.03.2020 17:14, julien@xen.org wrote:
>>> @@ -785,21 +781,21 @@ bool is_iomem_page(mfn_t mfn)
>>>       return (page_get_owner(page) == dom_io);
>>>   }
>>>   -static int update_xen_mappings(unsigned long mfn, unsigned int cacheattr)
>>> +static int update_xen_mappings(mfn_t mfn, unsigned int cacheattr)
>>>   {
>>>       int err = 0;
>>> -    bool alias = mfn >= PFN_DOWN(xen_phys_start) &&
>>> -         mfn < PFN_UP(xen_phys_start + xen_virt_end - XEN_VIRT_START);
>>> +    bool alias = mfn_x(mfn) >= PFN_DOWN(xen_phys_start) &&
>>> +         mfn_x(mfn) < PFN_UP(xen_phys_start + xen_virt_end - XEN_VIRT_START);
>>>       unsigned long xen_va =
>>> -        XEN_VIRT_START + ((mfn - PFN_DOWN(xen_phys_start)) << PAGE_SHIFT);
>>> +        XEN_VIRT_START + mfn_to_maddr(mfn_add(mfn, -PFN_DOWN(xen_phys_start)));
>>
>> Depending on the types involved (e.g. in PFN_DOWN()) this may
>> or may not be safe, so I consider such a transformation at
>> least fragile. I think we either want to gain mfn_sub() or
>> keep this as a "real" subtraction.
> I want to avoid mfn_x() as much as possible when everything can
> be done using typesafe operation. But i am not sure how
> mfn_sub() would solve the problem. Do you mind providing more
> information?

Consider PFN_DOWN() potentially returning "unsigned int". The
negation of an unsigned int is still an unsigned int, and hence
e.g. -1U (which might result here) is really 0xFFFFFFFF rather
than -1L / -1UL as intended. Whereas with mfn_sub() the
conversion to unsigned long of the (positive) value to subtract
would occur as part of evaluating function arguments, and the
resulting subtraction would then be correct.

>>> @@ -584,21 +584,21 @@ static unsigned long init_node_heap(int node, unsigned long mfn,
>>>           needed = 0;
>>>       }
>>>       else if ( *use_tail && nr >= needed &&
>>> -              arch_mfn_in_directmap(mfn + nr) &&
>>> +              arch_mfn_in_directmap(mfn_x(mfn_add(mfn, nr))) &&
>>>                 (!xenheap_bits ||
>>> -               !((mfn + nr - 1) >> (xenheap_bits - PAGE_SHIFT))) )
>>> +               !((mfn_x(mfn) + nr - 1) >> (xenheap_bits - PAGE_SHIFT))) )
>>
>> May I suggest consistency here: This one uses +, while ...
>>
>>>       {
>>> -        _heap[node] = mfn_to_virt(mfn + nr - needed);
>>> -        avail[node] = mfn_to_virt(mfn + nr - 1) +
>>> +        _heap[node] = mfn_to_virt(mfn_add(mfn, nr - needed));
>>> +        avail[node] = mfn_to_virt(mfn_add(mfn, nr - 1)) +
>>>                         PAGE_SIZE - sizeof(**avail) * NR_ZONES;
>>>       }
>>>       else if ( nr >= needed &&
>>> -              arch_mfn_in_directmap(mfn + needed) &&
>>> +              arch_mfn_in_directmap(mfn_x(mfn_add(mfn, needed))) &&
>>
>> ... this one uses mfn_add() despite the mfn_x() around it, and ...
> 
> So the reason I used mfn_x(mfn_add(mfn, needed)) here is I plan
> to convert arch_mfn_in_directmap() to use typesafe soon. In the
> two others cases...
> 
>>>                 (!xenheap_bits ||
>>> -               !((mfn + needed - 1) >> (xenheap_bits - PAGE_SHIFT))) )
>>> +               !((mfn_x(mfn) + needed - 1) >> (xenheap_bits - PAGE_SHIFT))) )
>>
>> ... here you use + again. My personal preference would be to avoid
>> constructs like mfn_x(mfn_add()).
> 
> ... I am still unsure how to avoid mfn_x(). Do you have any ideas?

I don't see how it can be avoided right now. But I also don't see
why - for consistency, as said - you couldn't use mfn_x() also in
the middle case. You could then still convert to mfn_add() with
that future change of yours.

>>> --- a/xen/include/asm-x86/mm.h
>>> +++ b/xen/include/asm-x86/mm.h
>>> @@ -667,7 +667,7 @@ static inline bool arch_mfn_in_directmap(unsigned long mfn)
>>>   {
>>>       unsigned long eva = min(DIRECTMAP_VIRT_END, HYPERVISOR_VIRT_END);
>>>   -    return mfn <= (virt_to_mfn(eva - 1) + 1);
>>> +    return mfn <= mfn_x(mfn_add(virt_to_mfn(eva - 1),  1));
>>
>> Even if you wanted to stick to using mfn_add() here, there's one
>> blank too many after the comma.
> 
> I will remove the extra blank. Regarding the construction, I have
> been wondering for a couple of years now whether we should
> introduce mfn_{lt, gt}. What do you think?

I too have been wondering, and wouldn't mind their introduction
(plus mfn_le / mfn_ge perhaps). But it'll truly help you here
anyway only once the function parameter is also mfn_t.

Jan
Julien Grall March 28, 2020, 10:33 a.m. UTC | #4
Hi,

On 26/03/2020 09:09, Jan Beulich wrote:
> On 25.03.2020 19:21, Julien Grall wrote:
>> On 25/03/2020 15:27, Jan Beulich wrote:
>>> On 22.03.2020 17:14, julien@xen.org wrote:
>>>> @@ -785,21 +781,21 @@ bool is_iomem_page(mfn_t mfn)
>>>>        return (page_get_owner(page) == dom_io);
>>>>    }
>>>>    -static int update_xen_mappings(unsigned long mfn, unsigned int cacheattr)
>>>> +static int update_xen_mappings(mfn_t mfn, unsigned int cacheattr)
>>>>    {
>>>>        int err = 0;
>>>> -    bool alias = mfn >= PFN_DOWN(xen_phys_start) &&
>>>> -         mfn < PFN_UP(xen_phys_start + xen_virt_end - XEN_VIRT_START);
>>>> +    bool alias = mfn_x(mfn) >= PFN_DOWN(xen_phys_start) &&
>>>> +         mfn_x(mfn) < PFN_UP(xen_phys_start + xen_virt_end - XEN_VIRT_START);
>>>>        unsigned long xen_va =
>>>> -        XEN_VIRT_START + ((mfn - PFN_DOWN(xen_phys_start)) << PAGE_SHIFT);
>>>> +        XEN_VIRT_START + mfn_to_maddr(mfn_add(mfn, -PFN_DOWN(xen_phys_start)));
>>>
>>> Depending on the types involved (e.g. in PFN_DOWN()) this may
>>> or may not be safe, so I consider such a transformation at
>>> least fragile. I think we either want to gain mfn_sub() or
>>> keep this as a "real" subtraction.
>> I want to avoid mfn_x() as much as possible when everything can
>> be done using typesafe operation. But i am not sure how
>> mfn_sub() would solve the problem. Do you mind providing more
>> information?
> 
> Consider PFN_DOWN() potentially returning "unsigned int". The
> negation of an unsigned int is still an unsigned int, and hence
> e.g. -1U (which might result here) is really 0xFFFFFFFF rather
> than -1L / -1UL as intended. Whereas with mfn_sub() the
> conversion to unsigned long of the (positive) value to subtract
> would occur as part of evaluating function arguments, and the
> resulting subtraction would then be correct.

I will have a look to introduce mfn_sub().

> 
>>>> @@ -584,21 +584,21 @@ static unsigned long init_node_heap(int node, unsigned long mfn,
>>>>            needed = 0;
>>>>        }
>>>>        else if ( *use_tail && nr >= needed &&
>>>> -              arch_mfn_in_directmap(mfn + nr) &&
>>>> +              arch_mfn_in_directmap(mfn_x(mfn_add(mfn, nr))) &&
>>>>                  (!xenheap_bits ||
>>>> -               !((mfn + nr - 1) >> (xenheap_bits - PAGE_SHIFT))) )
>>>> +               !((mfn_x(mfn) + nr - 1) >> (xenheap_bits - PAGE_SHIFT))) )
>>>
>>> May I suggest consistency here: This one uses +, while ...
>>>
>>>>        {
>>>> -        _heap[node] = mfn_to_virt(mfn + nr - needed);
>>>> -        avail[node] = mfn_to_virt(mfn + nr - 1) +
>>>> +        _heap[node] = mfn_to_virt(mfn_add(mfn, nr - needed));
>>>> +        avail[node] = mfn_to_virt(mfn_add(mfn, nr - 1)) +
>>>>                          PAGE_SIZE - sizeof(**avail) * NR_ZONES;
>>>>        }
>>>>        else if ( nr >= needed &&
>>>> -              arch_mfn_in_directmap(mfn + needed) &&
>>>> +              arch_mfn_in_directmap(mfn_x(mfn_add(mfn, needed))) &&
>>>
>>> ... this one uses mfn_add() despite the mfn_x() around it, and ...
>>
>> So the reason I used mfn_x(mfn_add(mfn, needed)) here is I plan
>> to convert arch_mfn_in_directmap() to use typesafe soon. In the
>> two others cases...
>>
>>>>                  (!xenheap_bits ||
>>>> -               !((mfn + needed - 1) >> (xenheap_bits - PAGE_SHIFT))) )
>>>> +               !((mfn_x(mfn) + needed - 1) >> (xenheap_bits - PAGE_SHIFT))) )
>>>
>>> ... here you use + again. My personal preference would be to avoid
>>> constructs like mfn_x(mfn_add()).
>>
>> ... I am still unsure how to avoid mfn_x(). Do you have any ideas?
> 
> I don't see how it can be avoided right now. But I also don't see
> why - for consistency, as said - you couldn't use mfn_x() also in
> the middle case. You could then still convert to mfn_add() with
> that future change of yours.

I could have as I could also have converted arch_mfn_in_directmap() to 
use typesafe MFN. Anything around typesafe is a can of worms and this is 
the fine line I found.

Anyway, I could not be bother to bikeshed... So I going to switch the 
other one to mfn_x(...) + needed.

> 
>>>> --- a/xen/include/asm-x86/mm.h
>>>> +++ b/xen/include/asm-x86/mm.h
>>>> @@ -667,7 +667,7 @@ static inline bool arch_mfn_in_directmap(unsigned long mfn)
>>>>    {
>>>>        unsigned long eva = min(DIRECTMAP_VIRT_END, HYPERVISOR_VIRT_END);
>>>>    -    return mfn <= (virt_to_mfn(eva - 1) + 1);
>>>> +    return mfn <= mfn_x(mfn_add(virt_to_mfn(eva - 1),  1));
>>>
>>> Even if you wanted to stick to using mfn_add() here, there's one
>>> blank too many after the comma.
>>
>> I will remove the extra blank. Regarding the construction, I have
>> been wondering for a couple of years now whether we should
>> introduce mfn_{lt, gt}. What do you think?
> 
> I too have been wondering, and wouldn't mind their introduction
> (plus mfn_le / mfn_ge perhaps). But it'll truly help you here
> anyway only once the function parameter is also mfn_t.

This is a longer term plan. So I am going to leave it like that for now 
until I manage to find time.

Cheers,
diff mbox series

Patch

diff --git a/xen/arch/arm/acpi/domain_build.c b/xen/arch/arm/acpi/domain_build.c
index 1b1cfabb00..b3ac32f601 100644
--- a/xen/arch/arm/acpi/domain_build.c
+++ b/xen/arch/arm/acpi/domain_build.c
@@ -20,10 +20,6 @@ 
 #include <asm/kernel.h>
 #include <asm/domain_build.h>
 
-/* Override macros from asm/page.h to make them work with mfn_t */
-#undef virt_to_mfn
-#define virt_to_mfn(va) _mfn(__virt_to_mfn(va))
-
 #define ACPI_DOM0_FDT_MIN_SIZE 4096
 
 static int __init acpi_iomem_deny_access(struct domain *d)
diff --git a/xen/arch/arm/alternative.c b/xen/arch/arm/alternative.c
index 237c4e5642..724b0b187e 100644
--- a/xen/arch/arm/alternative.c
+++ b/xen/arch/arm/alternative.c
@@ -32,10 +32,6 @@ 
 #include <asm/insn.h>
 #include <asm/page.h>
 
-/* Override macros from asm/page.h to make them work with mfn_t */
-#undef virt_to_mfn
-#define virt_to_mfn(va) _mfn(__virt_to_mfn(va))
-
 extern const struct alt_instr __alt_instructions[], __alt_instructions_end[];
 
 struct alt_region {
diff --git a/xen/arch/arm/cpuerrata.c b/xen/arch/arm/cpuerrata.c
index 0248893de0..68105fe91f 100644
--- a/xen/arch/arm/cpuerrata.c
+++ b/xen/arch/arm/cpuerrata.c
@@ -14,10 +14,6 @@ 
 #include <asm/insn.h>
 #include <asm/psci.h>
 
-/* Override macros from asm/page.h to make them work with mfn_t */
-#undef virt_to_mfn
-#define virt_to_mfn(va) _mfn(__virt_to_mfn(va))
-
 /* Hardening Branch predictor code for Arm64 */
 #ifdef CONFIG_ARM64_HARDEN_BRANCH_PREDICTOR
 
diff --git a/xen/arch/arm/domain_build.c b/xen/arch/arm/domain_build.c
index 4307087536..5c9a55f084 100644
--- a/xen/arch/arm/domain_build.c
+++ b/xen/arch/arm/domain_build.c
@@ -52,10 +52,6 @@  struct map_range_data
     p2m_type_t p2mt;
 };
 
-/* Override macros from asm/page.h to make them work with mfn_t */
-#undef virt_to_mfn
-#define virt_to_mfn(va) _mfn(__virt_to_mfn(va))
-
 //#define DEBUG_11_ALLOCATION
 #ifdef DEBUG_11_ALLOCATION
 # define D11PRINT(fmt, args...) printk(XENLOG_DEBUG fmt, ##args)
diff --git a/xen/arch/arm/livepatch.c b/xen/arch/arm/livepatch.c
index 915e9d926a..0ffdda6005 100644
--- a/xen/arch/arm/livepatch.c
+++ b/xen/arch/arm/livepatch.c
@@ -12,10 +12,6 @@ 
 #include <asm/livepatch.h>
 #include <asm/mm.h>
 
-/* Override macros from asm/page.h to make them work with mfn_t */
-#undef virt_to_mfn
-#define virt_to_mfn(va) _mfn(__virt_to_mfn(va))
-
 void *vmap_of_xen_text;
 
 int arch_livepatch_safety_check(void)
diff --git a/xen/arch/arm/mm.c b/xen/arch/arm/mm.c
index 727107eefa..1075e5fcaf 100644
--- a/xen/arch/arm/mm.c
+++ b/xen/arch/arm/mm.c
@@ -43,12 +43,6 @@ 
 
 #include <asm/setup.h>
 
-/* Override macros from asm/page.h to make them work with mfn_t */
-#undef virt_to_mfn
-#define virt_to_mfn(va) _mfn(__virt_to_mfn(va))
-#undef mfn_to_virt
-#define mfn_to_virt(mfn) __mfn_to_virt(mfn_x(mfn))
-
 #ifdef NDEBUG
 static inline void
 __attribute__ ((__format__ (__printf__, 1, 2)))
@@ -835,7 +829,7 @@  void __init setup_xenheap_mappings(unsigned long base_mfn,
      * Virtual address aligned to previous 1GB to match physical
      * address alignment done above.
      */
-    vaddr = (vaddr_t)__mfn_to_virt(base_mfn) & FIRST_MASK;
+    vaddr = (vaddr_t)mfn_to_virt(_mfn(base_mfn)) & FIRST_MASK;
 
     while ( mfn < end_mfn )
     {
diff --git a/xen/arch/x86/domain_page.c b/xen/arch/x86/domain_page.c
index dd32712d2f..8b8bf4cbe8 100644
--- a/xen/arch/x86/domain_page.c
+++ b/xen/arch/x86/domain_page.c
@@ -78,17 +78,17 @@  void *map_domain_page(mfn_t mfn)
 
 #ifdef NDEBUG
     if ( mfn_x(mfn) <= PFN_DOWN(__pa(HYPERVISOR_VIRT_END - 1)) )
-        return mfn_to_virt(mfn_x(mfn));
+        return mfn_to_virt(mfn);
 #endif
 
     v = mapcache_current_vcpu();
     if ( !v || !is_pv_vcpu(v) )
-        return mfn_to_virt(mfn_x(mfn));
+        return mfn_to_virt(mfn);
 
     dcache = &v->domain->arch.pv.mapcache;
     vcache = &v->arch.pv.mapcache;
     if ( !dcache->inuse )
-        return mfn_to_virt(mfn_x(mfn));
+        return mfn_to_virt(mfn);
 
     perfc_incr(map_domain_page_count);
 
@@ -311,7 +311,7 @@  void *map_domain_page_global(mfn_t mfn)
 
 #ifdef NDEBUG
     if ( mfn_x(mfn) <= PFN_DOWN(__pa(HYPERVISOR_VIRT_END - 1)) )
-        return mfn_to_virt(mfn_x(mfn));
+        return mfn_to_virt(mfn);
 #endif
 
     return vmap(&mfn, 1);
@@ -336,7 +336,7 @@  mfn_t domain_page_map_to_mfn(const void *ptr)
     const l1_pgentry_t *pl1e;
 
     if ( va >= DIRECTMAP_VIRT_START )
-        return _mfn(virt_to_mfn(ptr));
+        return virt_to_mfn(ptr);
 
     if ( va >= VMAP_VIRT_START && va < VMAP_VIRT_END )
     {
diff --git a/xen/arch/x86/hvm/dom0_build.c b/xen/arch/x86/hvm/dom0_build.c
index 2afd44c8a4..143b7e0a3c 100644
--- a/xen/arch/x86/hvm/dom0_build.c
+++ b/xen/arch/x86/hvm/dom0_build.c
@@ -444,31 +444,32 @@  static int __init pvh_populate_p2m(struct domain *d)
     /* Populate memory map. */
     for ( i = 0; i < d->arch.nr_e820; i++ )
     {
-        unsigned long addr, size;
+        mfn_t mfn;
+        unsigned long size;
 
         if ( d->arch.e820[i].type != E820_RAM )
             continue;
 
-        addr = PFN_DOWN(d->arch.e820[i].addr);
+        mfn = maddr_to_mfn(d->arch.e820[i].addr);
         size = PFN_DOWN(d->arch.e820[i].size);
 
-        rc = pvh_populate_memory_range(d, addr, size);
+        rc = pvh_populate_memory_range(d, mfn_x(mfn), size);
         if ( rc )
             return rc;
 
-        if ( addr < MB1_PAGES )
+        if ( mfn_x(mfn) < MB1_PAGES )
         {
             uint64_t end = min_t(uint64_t, MB(1),
                                  d->arch.e820[i].addr + d->arch.e820[i].size);
             enum hvm_translation_result res =
-                 hvm_copy_to_guest_phys(mfn_to_maddr(_mfn(addr)),
-                                        mfn_to_virt(addr),
+                 hvm_copy_to_guest_phys(mfn_to_maddr(mfn),
+                                        mfn_to_virt(mfn),
                                         d->arch.e820[i].addr - end,
                                         v);
 
             if ( res != HVMTRANS_okay )
-                printk("Failed to copy [%#lx, %#lx): %d\n",
-                       addr, addr + size, res);
+                printk("Failed to copy [%"PRI_mfn", %"PRI_mfn"): %d\n",
+                       mfn_x(mfn), mfn_x(mfn_add(mfn, size)), res);
         }
     }
 
@@ -607,7 +608,8 @@  static int __init pvh_load_kernel(struct domain *d, const module_t *image,
 
     if ( initrd != NULL )
     {
-        rc = hvm_copy_to_guest_phys(last_addr, mfn_to_virt(initrd->mod_start),
+        rc = hvm_copy_to_guest_phys(last_addr,
+                                    mfn_to_virt(_mfn(initrd->mod_start)),
                                     initrd->mod_end, v);
         if ( rc )
         {
diff --git a/xen/arch/x86/mm.c b/xen/arch/x86/mm.c
index 069a61deb8..7c0f81759a 100644
--- a/xen/arch/x86/mm.c
+++ b/xen/arch/x86/mm.c
@@ -152,10 +152,6 @@ 
 #include "pv/mm.h"
 #endif
 
-/* Override macros from asm/page.h to make them work with mfn_t */
-#undef virt_to_mfn
-#define virt_to_mfn(v) _mfn(__virt_to_mfn(v))
-
 /* Mapping of the fixmap space needed early. */
 l1_pgentry_t __section(".bss.page_aligned") __aligned(PAGE_SIZE)
     l1_fixmap[L1_PAGETABLE_ENTRIES];
@@ -323,8 +319,8 @@  void __init arch_init_memory(void)
         iostart_pfn = max_t(unsigned long, pfn, 1UL << (20 - PAGE_SHIFT));
         ioend_pfn = min(rstart_pfn, 16UL << (20 - PAGE_SHIFT));
         if ( iostart_pfn < ioend_pfn )
-            destroy_xen_mappings((unsigned long)mfn_to_virt(iostart_pfn),
-                                 (unsigned long)mfn_to_virt(ioend_pfn));
+            destroy_xen_mappings((unsigned long)mfn_to_virt(_mfn(iostart_pfn)),
+                                 (unsigned long)mfn_to_virt(_mfn(ioend_pfn)));
 
         /* Mark as I/O up to next RAM region. */
         for ( ; pfn < rstart_pfn; pfn++ )
@@ -785,21 +781,21 @@  bool is_iomem_page(mfn_t mfn)
     return (page_get_owner(page) == dom_io);
 }
 
-static int update_xen_mappings(unsigned long mfn, unsigned int cacheattr)
+static int update_xen_mappings(mfn_t mfn, unsigned int cacheattr)
 {
     int err = 0;
-    bool alias = mfn >= PFN_DOWN(xen_phys_start) &&
-         mfn < PFN_UP(xen_phys_start + xen_virt_end - XEN_VIRT_START);
+    bool alias = mfn_x(mfn) >= PFN_DOWN(xen_phys_start) &&
+         mfn_x(mfn) < PFN_UP(xen_phys_start + xen_virt_end - XEN_VIRT_START);
     unsigned long xen_va =
-        XEN_VIRT_START + ((mfn - PFN_DOWN(xen_phys_start)) << PAGE_SHIFT);
+        XEN_VIRT_START + mfn_to_maddr(mfn_add(mfn, -PFN_DOWN(xen_phys_start)));
 
     if ( unlikely(alias) && cacheattr )
-        err = map_pages_to_xen(xen_va, _mfn(mfn), 1, 0);
+        err = map_pages_to_xen(xen_va, mfn, 1, 0);
     if ( !err )
-        err = map_pages_to_xen((unsigned long)mfn_to_virt(mfn), _mfn(mfn), 1,
+        err = map_pages_to_xen((unsigned long)mfn_to_virt(mfn), mfn, 1,
                      PAGE_HYPERVISOR | cacheattr_to_pte_flags(cacheattr));
     if ( unlikely(alias) && !cacheattr && !err )
-        err = map_pages_to_xen(xen_va, _mfn(mfn), 1, PAGE_HYPERVISOR);
+        err = map_pages_to_xen(xen_va, mfn, 1, PAGE_HYPERVISOR);
     return err;
 }
 
@@ -1029,7 +1025,7 @@  get_page_from_l1e(
             nx = (x & ~PGC_cacheattr_mask) | (cacheattr << PGC_cacheattr_base);
         } while ( (y = cmpxchg(&page->count_info, x, nx)) != x );
 
-        err = update_xen_mappings(mfn, cacheattr);
+        err = update_xen_mappings(_mfn(mfn), cacheattr);
         if ( unlikely(err) )
         {
             cacheattr = y & PGC_cacheattr_mask;
@@ -2449,7 +2445,7 @@  static int cleanup_page_mappings(struct page_info *page)
 
         BUG_ON(is_xen_heap_page(page));
 
-        rc = update_xen_mappings(mfn, 0);
+        rc = update_xen_mappings(_mfn(mfn), 0);
     }
 
     /*
@@ -4950,7 +4946,7 @@  void *alloc_xen_pagetable(void)
 {
     mfn_t mfn = alloc_xen_pagetable_new();
 
-    return mfn_eq(mfn, INVALID_MFN) ? NULL : mfn_to_virt(mfn_x(mfn));
+    return mfn_eq(mfn, INVALID_MFN) ? NULL : mfn_to_virt(mfn);
 }
 
 void free_xen_pagetable(void *v)
@@ -4983,7 +4979,7 @@  mfn_t alloc_xen_pagetable_new(void)
 void free_xen_pagetable_new(mfn_t mfn)
 {
     if ( system_state != SYS_STATE_early_boot && !mfn_eq(mfn, INVALID_MFN) )
-        free_xenheap_page(mfn_to_virt(mfn_x(mfn)));
+        free_xenheap_page(mfn_to_virt(mfn));
 }
 
 static DEFINE_SPINLOCK(map_pgdir_lock);
diff --git a/xen/arch/x86/numa.c b/xen/arch/x86/numa.c
index f1066c59c7..87f7365304 100644
--- a/xen/arch/x86/numa.c
+++ b/xen/arch/x86/numa.c
@@ -100,14 +100,12 @@  static int __init populate_memnodemap(const struct node *nodes,
 static int __init allocate_cachealigned_memnodemap(void)
 {
     unsigned long size = PFN_UP(memnodemapsize * sizeof(*memnodemap));
-    unsigned long mfn = mfn_x(alloc_boot_pages(size, 1));
+    mfn_t mfn = alloc_boot_pages(size, 1);
 
     memnodemap = mfn_to_virt(mfn);
-    mfn <<= PAGE_SHIFT;
-    size <<= PAGE_SHIFT;
     printk(KERN_DEBUG "NUMA: Allocated memnodemap from %lx - %lx\n",
-           mfn, mfn + size);
-    memnodemapsize = size / sizeof(*memnodemap);
+           mfn_to_maddr(mfn), mfn_to_maddr(mfn_add(mfn, size)));
+    memnodemapsize = (size << PAGE_SHIFT) / sizeof(*memnodemap);
 
     return 0;
 }
diff --git a/xen/arch/x86/pv/descriptor-tables.c b/xen/arch/x86/pv/descriptor-tables.c
index 940804b18a..f22beb1f3c 100644
--- a/xen/arch/x86/pv/descriptor-tables.c
+++ b/xen/arch/x86/pv/descriptor-tables.c
@@ -76,7 +76,7 @@  bool pv_destroy_ldt(struct vcpu *v)
 void pv_destroy_gdt(struct vcpu *v)
 {
     l1_pgentry_t *pl1e = pv_gdt_ptes(v);
-    mfn_t zero_mfn = _mfn(virt_to_mfn(zero_page));
+    mfn_t zero_mfn = virt_to_mfn(zero_page);
     l1_pgentry_t zero_l1e = l1e_from_mfn(zero_mfn, __PAGE_HYPERVISOR_RO);
     unsigned int i;
 
diff --git a/xen/arch/x86/pv/dom0_build.c b/xen/arch/x86/pv/dom0_build.c
index 5678da782d..30846b5f97 100644
--- a/xen/arch/x86/pv/dom0_build.c
+++ b/xen/arch/x86/pv/dom0_build.c
@@ -523,7 +523,7 @@  int __init dom0_construct_pv(struct domain *d,
                     free_domheap_pages(page, order);
                     page += 1UL << order;
                 }
-            memcpy(page_to_virt(page), mfn_to_virt(initrd->mod_start),
+            memcpy(page_to_virt(page), mfn_to_virt(_mfn(initrd->mod_start)),
                    initrd_len);
             mpt_alloc = (paddr_t)initrd->mod_start << PAGE_SHIFT;
             init_domheap_pages(mpt_alloc,
@@ -601,7 +601,7 @@  int __init dom0_construct_pv(struct domain *d,
         maddr_to_page(mpt_alloc)->u.inuse.type_info = PGT_l4_page_table;
         l4start = l4tab = __va(mpt_alloc); mpt_alloc += PAGE_SIZE;
         clear_page(l4tab);
-        init_xen_l4_slots(l4tab, _mfn(virt_to_mfn(l4start)),
+        init_xen_l4_slots(l4tab, virt_to_mfn(l4start),
                           d, INVALID_MFN, true);
         v->arch.guest_table = pagetable_from_paddr(__pa(l4start));
     }
diff --git a/xen/arch/x86/pv/shim.c b/xen/arch/x86/pv/shim.c
index ed2ece8a8a..b849c60699 100644
--- a/xen/arch/x86/pv/shim.c
+++ b/xen/arch/x86/pv/shim.c
@@ -39,9 +39,6 @@ 
 
 #include <compat/grant_table.h>
 
-#undef virt_to_mfn
-#define virt_to_mfn(va) _mfn(__virt_to_mfn(va))
-
 #ifdef CONFIG_PV_SHIM_EXCLUSIVE
 /* Tolerate "pv-shim" being passed to a CONFIG_PV_SHIM_EXCLUSIVE hypervisor. */
 ignore_param("pv-shim");
diff --git a/xen/arch/x86/setup.c b/xen/arch/x86/setup.c
index 885919d5c3..cfe95c5dac 100644
--- a/xen/arch/x86/setup.c
+++ b/xen/arch/x86/setup.c
@@ -340,7 +340,7 @@  void *__init bootstrap_map(const module_t *mod)
     void *ret;
 
     if ( system_state != SYS_STATE_early_boot )
-        return mod ? mfn_to_virt(mod->mod_start) : NULL;
+        return mod ? mfn_to_virt(_mfn(mod->mod_start)) : NULL;
 
     if ( !mod )
     {
@@ -1005,7 +1005,7 @@  void __init noreturn __start_xen(unsigned long mbi_p)
          * This needs to remain in sync with xen_in_range() and the
          * respective reserve_e820_ram() invocation below.
          */
-        mod[mbi->mods_count].mod_start = virt_to_mfn(_stext);
+        mod[mbi->mods_count].mod_start = mfn_x(virt_to_mfn(_stext));
         mod[mbi->mods_count].mod_end = __2M_rwdata_end - _stext;
     }
 
@@ -1404,7 +1404,7 @@  void __init noreturn __start_xen(unsigned long mbi_p)
     {
         set_pdx_range(mod[i].mod_start,
                       mod[i].mod_start + PFN_UP(mod[i].mod_end));
-        map_pages_to_xen((unsigned long)mfn_to_virt(mod[i].mod_start),
+        map_pages_to_xen((unsigned long)mfn_to_virt(_mfn(mod[i].mod_start)),
                          _mfn(mod[i].mod_start),
                          PFN_UP(mod[i].mod_end), PAGE_HYPERVISOR);
     }
@@ -1494,9 +1494,9 @@  void __init noreturn __start_xen(unsigned long mbi_p)
 
     numa_initmem_init(0, raw_max_page);
 
-    if ( max_page - 1 > virt_to_mfn(HYPERVISOR_VIRT_END - 1) )
+    if ( max_page - 1 > mfn_x(virt_to_mfn(HYPERVISOR_VIRT_END - 1)) )
     {
-        unsigned long limit = virt_to_mfn(HYPERVISOR_VIRT_END - 1);
+        unsigned long limit = mfn_x(virt_to_mfn(HYPERVISOR_VIRT_END - 1));
         uint64_t mask = PAGE_SIZE - 1;
 
         if ( !highmem_start )
diff --git a/xen/arch/x86/smpboot.c b/xen/arch/x86/smpboot.c
index 09264b02d1..31b4366ab2 100644
--- a/xen/arch/x86/smpboot.c
+++ b/xen/arch/x86/smpboot.c
@@ -996,7 +996,7 @@  static int cpu_smpboot_alloc(unsigned int cpu)
         goto out;
     per_cpu(gdt, cpu) = gdt;
     per_cpu(gdt_l1e, cpu) =
-        l1e_from_pfn(virt_to_mfn(gdt), __PAGE_HYPERVISOR_RW);
+        l1e_from_mfn(virt_to_mfn(gdt), __PAGE_HYPERVISOR_RW);
     memcpy(gdt, boot_gdt, NR_RESERVED_GDT_PAGES * PAGE_SIZE);
     BUILD_BUG_ON(NR_CPUS > 0x10000);
     gdt[PER_CPU_GDT_ENTRY - FIRST_RESERVED_GDT_ENTRY].a = cpu;
@@ -1005,7 +1005,7 @@  static int cpu_smpboot_alloc(unsigned int cpu)
     if ( gdt == NULL )
         goto out;
     per_cpu(compat_gdt_l1e, cpu) =
-        l1e_from_pfn(virt_to_mfn(gdt), __PAGE_HYPERVISOR_RW);
+        l1e_from_mfn(virt_to_mfn(gdt), __PAGE_HYPERVISOR_RW);
     memcpy(gdt, boot_compat_gdt, NR_RESERVED_GDT_PAGES * PAGE_SIZE);
     gdt[PER_CPU_GDT_ENTRY - FIRST_RESERVED_GDT_ENTRY].a = cpu;
 
diff --git a/xen/arch/x86/srat.c b/xen/arch/x86/srat.c
index 506a56d66b..0baf8b97ce 100644
--- a/xen/arch/x86/srat.c
+++ b/xen/arch/x86/srat.c
@@ -196,7 +196,7 @@  void __init acpi_numa_slit_init(struct acpi_table_slit *slit)
 		return;
 	}
 	mfn = alloc_boot_pages(PFN_UP(slit->header.length), 1);
-	acpi_slit = mfn_to_virt(mfn_x(mfn));
+	acpi_slit = mfn_to_virt(mfn);
 	memcpy(acpi_slit, slit, slit->header.length);
 }
 
diff --git a/xen/arch/x86/tboot.c b/xen/arch/x86/tboot.c
index 8c232270b4..19ea69f7c1 100644
--- a/xen/arch/x86/tboot.c
+++ b/xen/arch/x86/tboot.c
@@ -260,7 +260,7 @@  static int mfn_in_guarded_stack(unsigned long mfn)
             continue;
         p = (void *)((unsigned long)stack_base[i] + STACK_SIZE -
                      PRIMARY_STACK_SIZE - PAGE_SIZE);
-        if ( mfn == virt_to_mfn(p) )
+        if ( mfn_eq(_mfn(mfn), virt_to_mfn(p)) )
             return -1;
     }
 
@@ -296,7 +296,7 @@  static void tboot_gen_xenheap_integrity(const uint8_t key[TB_KEY_SIZE],
             if ( mfn_in_guarded_stack(mfn) )
                 continue; /* skip guard stack, see memguard_guard_stack() in mm.c */
 
-            pg = mfn_to_virt(mfn);
+            pg = mfn_to_virt(_mfn(mfn));
             vmac_update((uint8_t *)pg, PAGE_SIZE, &ctx);
         }
     }
diff --git a/xen/arch/x86/traps.c b/xen/arch/x86/traps.c
index e838846c6b..4aa7c35be4 100644
--- a/xen/arch/x86/traps.c
+++ b/xen/arch/x86/traps.c
@@ -2029,9 +2029,9 @@  void __init trap_init(void)
 
     /* Cache {,compat_}gdt_l1e now that physically relocation is done. */
     this_cpu(gdt_l1e) =
-        l1e_from_pfn(virt_to_mfn(boot_gdt), __PAGE_HYPERVISOR_RW);
+        l1e_from_mfn(virt_to_mfn(boot_gdt), __PAGE_HYPERVISOR_RW);
     this_cpu(compat_gdt_l1e) =
-        l1e_from_pfn(virt_to_mfn(boot_compat_gdt), __PAGE_HYPERVISOR_RW);
+        l1e_from_mfn(virt_to_mfn(boot_compat_gdt), __PAGE_HYPERVISOR_RW);
 
     percpu_traps_init();
 
diff --git a/xen/arch/x86/x86_64/mm.c b/xen/arch/x86/x86_64/mm.c
index 3516423bb0..ddd5f1ddc4 100644
--- a/xen/arch/x86/x86_64/mm.c
+++ b/xen/arch/x86/x86_64/mm.c
@@ -1369,11 +1369,12 @@  int memory_add(unsigned long spfn, unsigned long epfn, unsigned int pxm)
         return -EINVAL;
     }
 
-    i = virt_to_mfn(HYPERVISOR_VIRT_END - 1) + 1;
+    i = mfn_x(virt_to_mfn(HYPERVISOR_VIRT_END - 1)) + 1;
     if ( spfn < i )
     {
-        ret = map_pages_to_xen((unsigned long)mfn_to_virt(spfn), _mfn(spfn),
-                               min(epfn, i) - spfn, PAGE_HYPERVISOR);
+        ret = map_pages_to_xen((unsigned long)mfn_to_virt(_mfn(spfn)),
+                               _mfn(spfn), min(epfn, i) - spfn,
+                               PAGE_HYPERVISOR);
         if ( ret )
             goto destroy_directmap;
     }
@@ -1381,7 +1382,7 @@  int memory_add(unsigned long spfn, unsigned long epfn, unsigned int pxm)
     {
         if ( i < spfn )
             i = spfn;
-        ret = map_pages_to_xen((unsigned long)mfn_to_virt(i), _mfn(i),
+        ret = map_pages_to_xen((unsigned long)mfn_to_virt(_mfn(i)), _mfn(i),
                                epfn - i, __PAGE_HYPERVISOR_RW);
         if ( ret )
             goto destroy_directmap;
@@ -1473,8 +1474,8 @@  destroy_frametable:
     NODE_DATA(node)->node_start_pfn = old_node_start;
     NODE_DATA(node)->node_spanned_pages = old_node_span;
  destroy_directmap:
-    destroy_xen_mappings((unsigned long)mfn_to_virt(spfn),
-                         (unsigned long)mfn_to_virt(epfn));
+    destroy_xen_mappings((unsigned long)mfn_to_virt(_mfn(spfn)),
+                         (unsigned long)mfn_to_virt(_mfn(epfn)));
 
     return ret;
 }
diff --git a/xen/common/domctl.c b/xen/common/domctl.c
index a69b3b59a8..e4a055dc67 100644
--- a/xen/common/domctl.c
+++ b/xen/common/domctl.c
@@ -196,7 +196,8 @@  void getdomaininfo(struct domain *d, struct xen_domctl_getdomaininfo *info)
     info->outstanding_pages = d->outstanding_pages;
     info->shr_pages         = atomic_read(&d->shr_pages);
     info->paged_pages       = atomic_read(&d->paged_pages);
-    info->shared_info_frame = mfn_to_gmfn(d, virt_to_mfn(d->shared_info));
+    info->shared_info_frame = mfn_to_gmfn(d,
+                                          mfn_x(virt_to_mfn(d->shared_info)));
     BUG_ON(SHARED_M2P(info->shared_info_frame));
 
     info->cpupool = cpupool_get_id(d);
diff --git a/xen/common/efi/boot.c b/xen/common/efi/boot.c
index a6f84c945a..4f944fb3e8 100644
--- a/xen/common/efi/boot.c
+++ b/xen/common/efi/boot.c
@@ -1447,7 +1447,7 @@  static __init void copy_mapping(unsigned long mfn, unsigned long end,
     {
         l4_pgentry_t l4e = efi_l4_pgtable[l4_table_offset(mfn << PAGE_SHIFT)];
         l3_pgentry_t *l3src, *l3dst;
-        unsigned long va = (unsigned long)mfn_to_virt(mfn);
+        unsigned long va = (unsigned long)mfn_to_virt(_mfn(mfn));
 
         next = mfn + (1UL << (L3_PAGETABLE_SHIFT - PAGE_SHIFT));
         if ( !is_valid(mfn, min(next, end)) )
@@ -1562,9 +1562,10 @@  void __init efi_init_memory(void)
              !(smfn & pfn_hole_mask) &&
              !((smfn ^ (emfn - 1)) & ~pfn_pdx_bottom_mask) )
         {
-            if ( (unsigned long)mfn_to_virt(emfn - 1) >= HYPERVISOR_VIRT_END )
+            if ( (unsigned long)mfn_to_virt(_mfn(emfn - 1)) >=
+                 HYPERVISOR_VIRT_END )
                 prot &= ~_PAGE_GLOBAL;
-            if ( map_pages_to_xen((unsigned long)mfn_to_virt(smfn),
+            if ( map_pages_to_xen((unsigned long)mfn_to_virt(_mfn(smfn)),
                                   _mfn(smfn), emfn - smfn, prot) == 0 )
                 desc->VirtualStart =
                     (unsigned long)maddr_to_virt(desc->PhysicalStart);
diff --git a/xen/common/grant_table.c b/xen/common/grant_table.c
index 9fd6e60416..407fdf08ff 100644
--- a/xen/common/grant_table.c
+++ b/xen/common/grant_table.c
@@ -3935,8 +3935,8 @@  static int gnttab_get_status_frame_mfn(struct domain *d,
     }
 
     /* Make sure idx is bounded wrt nr_status_frames */
-    *mfn = _mfn(virt_to_mfn(
-                gt->status[array_index_nospec(idx, nr_status_frames(gt))]));
+    *mfn = virt_to_mfn(
+                gt->status[array_index_nospec(idx, nr_status_frames(gt))]);
     return 0;
 }
 
@@ -3966,8 +3966,8 @@  static int gnttab_get_shared_frame_mfn(struct domain *d,
     }
 
     /* Make sure idx is bounded wrt nr_status_frames */
-    *mfn = _mfn(virt_to_mfn(
-                gt->shared_raw[array_index_nospec(idx, nr_grant_frames(gt))]));
+    *mfn = virt_to_mfn(
+                gt->shared_raw[array_index_nospec(idx, nr_grant_frames(gt))]);
     return 0;
 }
 
diff --git a/xen/common/page_alloc.c b/xen/common/page_alloc.c
index 76d37226df..41e4fa899d 100644
--- a/xen/common/page_alloc.c
+++ b/xen/common/page_alloc.c
@@ -565,7 +565,7 @@  static unsigned int __read_mostly xenheap_bits;
 #define xenheap_bits 0
 #endif
 
-static unsigned long init_node_heap(int node, unsigned long mfn,
+static unsigned long init_node_heap(int node, mfn_t mfn,
                                     unsigned long nr, bool *use_tail)
 {
     /* First node to be discovered has its heap metadata statically alloced. */
@@ -584,21 +584,21 @@  static unsigned long init_node_heap(int node, unsigned long mfn,
         needed = 0;
     }
     else if ( *use_tail && nr >= needed &&
-              arch_mfn_in_directmap(mfn + nr) &&
+              arch_mfn_in_directmap(mfn_x(mfn_add(mfn, nr))) &&
               (!xenheap_bits ||
-               !((mfn + nr - 1) >> (xenheap_bits - PAGE_SHIFT))) )
+               !((mfn_x(mfn) + nr - 1) >> (xenheap_bits - PAGE_SHIFT))) )
     {
-        _heap[node] = mfn_to_virt(mfn + nr - needed);
-        avail[node] = mfn_to_virt(mfn + nr - 1) +
+        _heap[node] = mfn_to_virt(mfn_add(mfn, nr - needed));
+        avail[node] = mfn_to_virt(mfn_add(mfn, nr - 1)) +
                       PAGE_SIZE - sizeof(**avail) * NR_ZONES;
     }
     else if ( nr >= needed &&
-              arch_mfn_in_directmap(mfn + needed) &&
+              arch_mfn_in_directmap(mfn_x(mfn_add(mfn, needed))) &&
               (!xenheap_bits ||
-               !((mfn + needed - 1) >> (xenheap_bits - PAGE_SHIFT))) )
+               !((mfn_x(mfn) + needed - 1) >> (xenheap_bits - PAGE_SHIFT))) )
     {
         _heap[node] = mfn_to_virt(mfn);
-        avail[node] = mfn_to_virt(mfn + needed - 1) +
+        avail[node] = mfn_to_virt(mfn_add(mfn, needed - 1)) +
                       PAGE_SIZE - sizeof(**avail) * NR_ZONES;
         *use_tail = false;
     }
@@ -1809,7 +1809,7 @@  static void init_heap_pages(
                             (find_first_set_bit(e) <= find_first_set_bit(s));
             unsigned long n;
 
-            n = init_node_heap(nid, mfn_x(page_to_mfn(pg + i)), nr_pages - i,
+            n = init_node_heap(nid, page_to_mfn(pg + i), nr_pages - i,
                                &use_tail);
             BUG_ON(i + n > nr_pages);
             if ( n && !use_tail )
diff --git a/xen/common/trace.c b/xen/common/trace.c
index a2a389a1c7..8dbbcd31de 100644
--- a/xen/common/trace.c
+++ b/xen/common/trace.c
@@ -218,7 +218,7 @@  static int alloc_trace_bufs(unsigned int pages)
                 t_info_mfn_list[offset + i] = 0;
                 goto out_dealloc;
             }
-            t_info_mfn_list[offset + i] = virt_to_mfn(p);
+            t_info_mfn_list[offset + i] = mfn_x(virt_to_mfn(p));
         }
     }
 
@@ -234,7 +234,8 @@  static int alloc_trace_bufs(unsigned int pages)
         offset = t_info->mfn_offset[cpu];
 
         /* Initialize the buffer metadata */
-        per_cpu(t_bufs, cpu) = buf = mfn_to_virt(t_info_mfn_list[offset]);
+        buf = mfn_to_virt(_mfn(t_info_mfn_list[offset]));
+        per_cpu(t_bufs, cpu) = buf;
         buf->cons = buf->prod = 0;
 
         printk(XENLOG_INFO "xentrace: p%d mfn %x offset %u\n",
@@ -269,10 +270,10 @@  out_dealloc:
             continue;
         for ( i = 0; i < pages; i++ )
         {
-            uint32_t mfn = t_info_mfn_list[offset + i];
-            if ( !mfn )
+            mfn_t mfn = _mfn(t_info_mfn_list[offset + i]);
+            if ( mfn_eq(mfn, _mfn(0)) )
                 break;
-            ASSERT(!(mfn_to_page(_mfn(mfn))->count_info & PGC_allocated));
+            ASSERT(!(mfn_to_page(mfn)->count_info & PGC_allocated));
             free_xenheap_pages(mfn_to_virt(mfn), 0);
         }
     }
@@ -378,7 +379,7 @@  int tb_control(struct xen_sysctl_tbuf_op *tbc)
     {
     case XEN_SYSCTL_TBUFOP_get_info:
         tbc->evt_mask   = tb_event_mask;
-        tbc->buffer_mfn = t_info ? virt_to_mfn(t_info) : 0;
+        tbc->buffer_mfn = t_info ? mfn_x(virt_to_mfn(t_info)) : 0;
         tbc->size = t_info_pages * PAGE_SIZE;
         break;
     case XEN_SYSCTL_TBUFOP_set_cpu_mask:
@@ -512,7 +513,7 @@  static unsigned char *next_record(const struct t_buf *buf, uint32_t *next,
     uint16_t per_cpu_mfn_offset;
     uint32_t per_cpu_mfn_nr;
     uint32_t *mfn_list;
-    uint32_t mfn;
+    mfn_t mfn;
     unsigned char *this_page;
 
     barrier(); /* must read buf->prod and buf->cons only once */
@@ -533,7 +534,7 @@  static unsigned char *next_record(const struct t_buf *buf, uint32_t *next,
     per_cpu_mfn_nr = x >> PAGE_SHIFT;
     per_cpu_mfn_offset = t_info->mfn_offset[smp_processor_id()];
     mfn_list = (uint32_t *)t_info;
-    mfn = mfn_list[per_cpu_mfn_offset + per_cpu_mfn_nr];
+    mfn = _mfn(mfn_list[per_cpu_mfn_offset + per_cpu_mfn_nr]);
     this_page = mfn_to_virt(mfn);
     if (per_cpu_mfn_nr + 1 >= opt_tbuf_size)
     {
@@ -542,7 +543,7 @@  static unsigned char *next_record(const struct t_buf *buf, uint32_t *next,
     }
     else
     {
-        mfn = mfn_list[per_cpu_mfn_offset + per_cpu_mfn_nr + 1];
+        mfn = _mfn(mfn_list[per_cpu_mfn_offset + per_cpu_mfn_nr + 1]);
         *next_page = mfn_to_virt(mfn);
     }
     return this_page;
diff --git a/xen/common/xenoprof.c b/xen/common/xenoprof.c
index 4f3e799ebb..2721e99da7 100644
--- a/xen/common/xenoprof.c
+++ b/xen/common/xenoprof.c
@@ -19,10 +19,6 @@ 
 #include <xsm/xsm.h>
 #include <xen/hypercall.h>
 
-/* Override macros from asm/page.h to make them work with mfn_t */
-#undef virt_to_mfn
-#define virt_to_mfn(va) _mfn(__virt_to_mfn(va))
-
 /* Limit amount of pages used for shared buffer (per domain) */
 #define MAX_OPROF_SHARED_PAGES 32
 
diff --git a/xen/drivers/acpi/osl.c b/xen/drivers/acpi/osl.c
index 4c8bb7839e..ca38565507 100644
--- a/xen/drivers/acpi/osl.c
+++ b/xen/drivers/acpi/osl.c
@@ -219,7 +219,7 @@  void *__init acpi_os_alloc_memory(size_t sz)
 	void *ptr;
 
 	if (system_state == SYS_STATE_early_boot)
-		return mfn_to_virt(mfn_x(alloc_boot_pages(PFN_UP(sz), 1)));
+		return mfn_to_virt(alloc_boot_pages(PFN_UP(sz), 1));
 
 	ptr = xmalloc_bytes(sz);
 	ASSERT(!ptr || is_xmalloc_memory(ptr));
diff --git a/xen/include/asm-arm/mm.h b/xen/include/asm-arm/mm.h
index 7df91280bc..abf4cc23e4 100644
--- a/xen/include/asm-arm/mm.h
+++ b/xen/include/asm-arm/mm.h
@@ -285,16 +285,8 @@  static inline uint64_t gvirt_to_maddr(vaddr_t va, paddr_t *pa,
 #define __va(x)             (maddr_to_virt(x))
 
 /* Convert between Xen-heap virtual addresses and machine frame numbers. */
-#define __virt_to_mfn(va) (virt_to_maddr(va) >> PAGE_SHIFT)
-#define __mfn_to_virt(mfn) (maddr_to_virt((paddr_t)(mfn) << PAGE_SHIFT))
-
-/*
- * We define non-underscored wrappers for above conversion functions.
- * These are overriden in various source files while underscored version
- * remain intact.
- */
-#define virt_to_mfn(va)     __virt_to_mfn(va)
-#define mfn_to_virt(mfn)    __mfn_to_virt(mfn)
+#define virt_to_mfn(va)     maddr_to_mfn(virt_to_maddr(va))
+#define mfn_to_virt(mfn)    maddr_to_virt(mfn_to_maddr(mfn))
 
 /* Convert between Xen-heap virtual addresses and page-info structures. */
 static inline struct page_info *virt_to_page(const void *v)
@@ -312,7 +304,7 @@  static inline struct page_info *virt_to_page(const void *v)
 
 static inline void *page_to_virt(const struct page_info *pg)
 {
-    return mfn_to_virt(mfn_x(page_to_mfn(pg)));
+    return mfn_to_virt(page_to_mfn(pg));
 }
 
 struct page_info *get_page_from_gva(struct vcpu *v, vaddr_t va,
diff --git a/xen/include/asm-x86/grant_table.h b/xen/include/asm-x86/grant_table.h
index 84e32960c0..5871238f6d 100644
--- a/xen/include/asm-x86/grant_table.h
+++ b/xen/include/asm-x86/grant_table.h
@@ -45,11 +45,11 @@  static inline int replace_grant_host_mapping(uint64_t addr, mfn_t frame,
     VALID_M2P(gpfn_) ? _gfn(gpfn_) : INVALID_GFN;                        \
 })
 
-#define gnttab_shared_mfn(t, i) _mfn(__virt_to_mfn((t)->shared_raw[i]))
+#define gnttab_shared_mfn(t, i) virt_to_mfn((t)->shared_raw[i])
 
 #define gnttab_shared_gfn(d, t, i) mfn_to_gfn(d, gnttab_shared_mfn(t, i))
 
-#define gnttab_status_mfn(t, i) _mfn(__virt_to_mfn((t)->status[i]))
+#define gnttab_status_mfn(t, i) virt_to_mfn((t)->status[i])
 
 #define gnttab_status_gfn(d, t, i) mfn_to_gfn(d, gnttab_status_mfn(t, i))
 
diff --git a/xen/include/asm-x86/mm.h b/xen/include/asm-x86/mm.h
index 9764362a38..83058fb8d1 100644
--- a/xen/include/asm-x86/mm.h
+++ b/xen/include/asm-x86/mm.h
@@ -667,7 +667,7 @@  static inline bool arch_mfn_in_directmap(unsigned long mfn)
 {
     unsigned long eva = min(DIRECTMAP_VIRT_END, HYPERVISOR_VIRT_END);
 
-    return mfn <= (virt_to_mfn(eva - 1) + 1);
+    return mfn <= mfn_x(mfn_add(virt_to_mfn(eva - 1),  1));
 }
 
 int arch_acquire_resource(struct domain *d, unsigned int type,
diff --git a/xen/include/asm-x86/page.h b/xen/include/asm-x86/page.h
index c98d8f5ede..624dbbb949 100644
--- a/xen/include/asm-x86/page.h
+++ b/xen/include/asm-x86/page.h
@@ -236,8 +236,8 @@  void copy_page_sse2(void *, const void *);
 #define __va(x)             (maddr_to_virt(x))
 
 /* Convert between Xen-heap virtual addresses and machine frame numbers. */
-#define __virt_to_mfn(va)   (virt_to_maddr(va) >> PAGE_SHIFT)
-#define __mfn_to_virt(mfn)  (maddr_to_virt((paddr_t)(mfn) << PAGE_SHIFT))
+#define virt_to_mfn(va)     maddr_to_mfn(virt_to_maddr(va))
+#define mfn_to_virt(mfn)    maddr_to_virt(mfn_to_maddr(mfn))
 
 /* Convert between machine frame numbers and page-info structures. */
 #define mfn_to_page(mfn)    (frame_table + mfn_to_pdx(mfn))
@@ -260,8 +260,6 @@  void copy_page_sse2(void *, const void *);
  * overridden in various source files while underscored versions remain intact.
  */
 #define mfn_valid(mfn)      __mfn_valid(mfn_x(mfn))
-#define virt_to_mfn(va)     __virt_to_mfn(va)
-#define mfn_to_virt(mfn)    __mfn_to_virt(mfn)
 #define virt_to_maddr(va)   __virt_to_maddr((unsigned long)(va))
 #define maddr_to_virt(ma)   __maddr_to_virt((unsigned long)(ma))
 #define maddr_to_page(ma)   __maddr_to_page(ma)
diff --git a/xen/include/xen/domain_page.h b/xen/include/xen/domain_page.h
index ab2be7b719..0314845921 100644
--- a/xen/include/xen/domain_page.h
+++ b/xen/include/xen/domain_page.h
@@ -53,14 +53,14 @@  static inline void *__map_domain_page_global(const struct page_info *pg)
 
 #else /* !CONFIG_DOMAIN_PAGE */
 
-#define map_domain_page(mfn)                __mfn_to_virt(mfn_x(mfn))
+#define map_domain_page(mfn)                mfn_to_virt(mfn)
 #define __map_domain_page(pg)               page_to_virt(pg)
 #define unmap_domain_page(va)               ((void)(va))
-#define domain_page_map_to_mfn(va)          _mfn(virt_to_mfn((unsigned long)(va)))
+#define domain_page_map_to_mfn(va)          virt_to_mfn((unsigned long)(va))
 
 static inline void *map_domain_page_global(mfn_t mfn)
 {
-    return mfn_to_virt(mfn_x(mfn));
+    return mfn_to_virt(mfn);
 }
 
 static inline void *__map_domain_page_global(const struct page_info *pg)