diff mbox series

[05/17] xen/x86: Remove the non-typesafe version of pagetable_* helpers

Message ID 20200322161418.31606-6-julien@xen.org (mailing list archive)
State New, archived
Headers show
Series Bunch of typesafe conversion | expand

Commit Message

Julien Grall March 22, 2020, 4:14 p.m. UTC
From: Julien Grall <jgrall@amazon.com>

Most of the users of the pagetable_* helpers can use the typesafe
version. Therefore, it is time to convert the callers still using
non-typesafe version to use the typesafe one.

Some part of the code assume that a pagetable is NULL when the MFN 0.
When possible this is replaced with the helper pagetable_is_null().

There are still someplace which test against MFN 0 and it is not clear
if other unconverted part of the code rely on the value. So, for now,
the NULL value is not changed to INVALID_MFN.

No functional changes intented.

Signed-off-by: Julien Grall <jgrall@amazon.com>
---
 xen/arch/x86/domain.c          | 18 ++++++++-------
 xen/arch/x86/domctl.c          |  6 ++---
 xen/arch/x86/hvm/vmx/vmcs.c    |  2 +-
 xen/arch/x86/hvm/vmx/vmx.c     |  2 +-
 xen/arch/x86/hvm/vmx/vvmx.c    |  2 +-
 xen/arch/x86/mm.c              | 40 +++++++++++++++++-----------------
 xen/arch/x86/mm/hap/hap.c      |  2 +-
 xen/arch/x86/mm/p2m-ept.c      |  2 +-
 xen/arch/x86/mm/p2m-pt.c       |  4 ++--
 xen/arch/x86/mm/p2m.c          |  2 +-
 xen/arch/x86/mm/shadow/multi.c | 24 ++++++++++----------
 xen/arch/x86/pv/dom0_build.c   | 10 ++++-----
 xen/arch/x86/traps.c           |  6 ++---
 xen/include/asm-x86/page.h     | 19 ++++++++--------
 14 files changed, 70 insertions(+), 69 deletions(-)

Comments

Jan Beulich March 26, 2020, 3:39 p.m. UTC | #1
On 22.03.2020 17:14, julien@xen.org wrote:
> --- a/xen/arch/x86/domain.c
> +++ b/xen/arch/x86/domain.c
> @@ -952,25 +952,27 @@ int arch_set_info_guest(
>      }
>      else
>      {
> -        unsigned long pfn = pagetable_get_pfn(v->arch.guest_table);
> +        mfn_t mfn = pagetable_get_mfn(v->arch.guest_table);
>          bool fail;
>  
>          if ( !compat )
>          {
> -            fail = xen_pfn_to_cr3(pfn) != c.nat->ctrlreg[3];
> +            fail = mfn_to_cr3(mfn) != c.nat->ctrlreg[3];

The patch, besides a few other comments further down, looks fine
on its own, but I don't think it can be acked without seeing the
effects of the adjustments pending to the patch introducing
mfn_to_cr3() and friends.

> @@ -3116,24 +3116,24 @@ int vcpu_destroy_pagetables(struct vcpu *v)
>  
>      /* Free that page if non-zero */
>      do {
> -        if ( mfn )
> +        if ( !mfn_eq(mfn, _mfn(0)) )

I admit I'm not fully certain either, but at the first glance

        if ( mfn_x(mfn) )

would seem more in line with the original code to me (and then
also elsewhere).

> @@ -3560,19 +3561,18 @@ long do_mmuext_op(
>              if ( unlikely(rc) )
>                  break;
>  
> -            old_mfn = pagetable_get_pfn(curr->arch.guest_table_user);
> +            old_mfn = pagetable_get_mfn(curr->arch.guest_table_user);
>              /*
>               * This is particularly important when getting restarted after the
>               * previous attempt got preempted in the put-old-MFN phase.
>               */
> -            if ( old_mfn == op.arg1.mfn )
> +            if ( mfn_eq(old_mfn, new_mfn) )
>                  break;
>  
> -            if ( op.arg1.mfn != 0 )
> +            if ( !mfn_eq(new_mfn, _mfn(0)) )

At least here I would clearly prefer the old code to be kept.

> @@ -3580,19 +3580,19 @@ long do_mmuext_op(
>                      else if ( rc != -ERESTART )
>                          gdprintk(XENLOG_WARNING,
>                                   "Error %d installing new mfn %" PRI_mfn "\n",
> -                                 rc, op.arg1.mfn);
> +                                 rc, mfn_x(new_mfn));

Here I'm also not sure I see the point of the conversion.

> @@ -2351,11 +2351,11 @@ int sh_safe_not_to_sync(struct vcpu *v, mfn_t gl1mfn)
>      ASSERT(mfn_valid(smfn));
>  #endif
>  
> -    if ( pagetable_get_pfn(v->arch.shadow_table[0]) == mfn_x(smfn)
> +    if ( mfn_eq(pagetable_get_mfn(v->arch.shadow_table[0]), smfn)
>  #if (SHADOW_PAGING_LEVELS == 3)
> -         || pagetable_get_pfn(v->arch.shadow_table[1]) == mfn_x(smfn)
> -         || pagetable_get_pfn(v->arch.shadow_table[2]) == mfn_x(smfn)
> -         || pagetable_get_pfn(v->arch.shadow_table[3]) == mfn_x(smfn)
> +         || mfn_eq(pagetable_get_mfn(v->arch.shadow_table[1]), smfn)
> +         || mfn_eq(pagetable_get_mfn(v->arch.shadow_table[2]), smfn)
> +         || mfn_eq(pagetable_get_mfn(v->arch.shadow_table[3]), smfn)
>  #endif
>          )

While here moving the || to their designated places would make
the code look worse overall, ...

> @@ -3707,7 +3707,7 @@ sh_update_linear_entries(struct vcpu *v)
>  
>      /* Don't try to update the monitor table if it doesn't exist */
>      if ( shadow_mode_external(d)
> -         && pagetable_get_pfn(v->arch.monitor_table) == 0 )
> +         && pagetable_is_null(v->arch.monitor_table) )

... could I talk you into moving the && here to the end of the
previous line, as you're touching this anyway?

Also, seeing there's quite a few conversions to pagetable_is_null()
and also seeing that this patch is quite big - could this
conversion be split out?

> @@ -213,17 +214,17 @@ static inline l4_pgentry_t l4e_from_paddr(paddr_t pa, unsigned int flags)
>  #ifndef __ASSEMBLY__
>  
>  /* Page-table type. */
> -typedef struct { u64 pfn; } pagetable_t;
> -#define pagetable_get_paddr(x)  ((paddr_t)(x).pfn << PAGE_SHIFT)
> +typedef struct { mfn_t mfn; } pagetable_t;
> +#define PAGETABLE_NULL_MFN      _mfn(0)

I'd prefer to get away without this constant.

Jan
Julien Grall March 28, 2020, 10:52 a.m. UTC | #2
Hi Jan,

On 26/03/2020 15:39, Jan Beulich wrote:
> On 22.03.2020 17:14, julien@xen.org wrote:
>> --- a/xen/arch/x86/domain.c
>> +++ b/xen/arch/x86/domain.c
>> @@ -952,25 +952,27 @@ int arch_set_info_guest(
>>       }
>>       else
>>       {
>> -        unsigned long pfn = pagetable_get_pfn(v->arch.guest_table);
>> +        mfn_t mfn = pagetable_get_mfn(v->arch.guest_table);
>>           bool fail;
>>   
>>           if ( !compat )
>>           {
>> -            fail = xen_pfn_to_cr3(pfn) != c.nat->ctrlreg[3];
>> +            fail = mfn_to_cr3(mfn) != c.nat->ctrlreg[3];
> 
> The patch, besides a few other comments further down, looks fine
> on its own, but I don't think it can be acked without seeing the
> effects of the adjustments pending to the patch introducing
> mfn_to_cr3() and friends.
> 
>> @@ -3116,24 +3116,24 @@ int vcpu_destroy_pagetables(struct vcpu *v)
>>   
>>       /* Free that page if non-zero */
>>       do {
>> -        if ( mfn )
>> +        if ( !mfn_eq(mfn, _mfn(0)) )
> 
> I admit I'm not fully certain either, but at the first glance
> 
>          if ( mfn_x(mfn) )
> 
> would seem more in line with the original code to me (and then
> also elsewhere).

It is doing *exactly* the same things. The whole point of typesafe is to 
use typesafe helper not open-coding test everywhere.

It is also easier to spot any use of MFN 0 within the code as you know 
could grep "_mfn(0)".

Therefore I will insist to the code as-is.

> 
>> @@ -3560,19 +3561,18 @@ long do_mmuext_op(
>>               if ( unlikely(rc) )
>>                   break;
>>   
>> -            old_mfn = pagetable_get_pfn(curr->arch.guest_table_user);
>> +            old_mfn = pagetable_get_mfn(curr->arch.guest_table_user);
>>               /*
>>                * This is particularly important when getting restarted after the
>>                * previous attempt got preempted in the put-old-MFN phase.
>>                */
>> -            if ( old_mfn == op.arg1.mfn )
>> +            if ( mfn_eq(old_mfn, new_mfn) )
>>                   break;
>>   
>> -            if ( op.arg1.mfn != 0 )
>> +            if ( !mfn_eq(new_mfn, _mfn(0)) )
> 
> At least here I would clearly prefer the old code to be kept.

See above.

> 
>> @@ -3580,19 +3580,19 @@ long do_mmuext_op(
>>                       else if ( rc != -ERESTART )
>>                           gdprintk(XENLOG_WARNING,
>>                                    "Error %d installing new mfn %" PRI_mfn "\n",
>> -                                 rc, op.arg1.mfn);
>> +                                 rc, mfn_x(new_mfn));
> 
> Here I'm also not sure I see the point of the conversion.

op.arg1.mfn and mfn are technically not the same type. The former is a 
xen_pfn_t, whilst the latter is mfn_t.

In practice they are both unsigned long on x86, so it should be fine to 
use PRI_mfn. However, I think this is an abuse and we should aim to use 
the proper PRI_* for a type.

> 
>> @@ -2351,11 +2351,11 @@ int sh_safe_not_to_sync(struct vcpu *v, mfn_t gl1mfn)
>>       ASSERT(mfn_valid(smfn));
>>   #endif
>>   
>> -    if ( pagetable_get_pfn(v->arch.shadow_table[0]) == mfn_x(smfn)
>> +    if ( mfn_eq(pagetable_get_mfn(v->arch.shadow_table[0]), smfn)
>>   #if (SHADOW_PAGING_LEVELS == 3)
>> -         || pagetable_get_pfn(v->arch.shadow_table[1]) == mfn_x(smfn)
>> -         || pagetable_get_pfn(v->arch.shadow_table[2]) == mfn_x(smfn)
>> -         || pagetable_get_pfn(v->arch.shadow_table[3]) == mfn_x(smfn)
>> +         || mfn_eq(pagetable_get_mfn(v->arch.shadow_table[1]), smfn)
>> +         || mfn_eq(pagetable_get_mfn(v->arch.shadow_table[2]), smfn)
>> +         || mfn_eq(pagetable_get_mfn(v->arch.shadow_table[3]), smfn)
>>   #endif
>>           )
> 
> While here moving the || to their designated places would make
> the code look worse overall, ...
> 
>> @@ -3707,7 +3707,7 @@ sh_update_linear_entries(struct vcpu *v)
>>   
>>       /* Don't try to update the monitor table if it doesn't exist */
>>       if ( shadow_mode_external(d)
>> -         && pagetable_get_pfn(v->arch.monitor_table) == 0 )
>> +         && pagetable_is_null(v->arch.monitor_table) )
> 
> ... could I talk you into moving the && here to the end of the
> previous line, as you're touching this anyway?

I will do.

> 
> Also, seeing there's quite a few conversions to pagetable_is_null()
> and also seeing that this patch is quite big - could this
> conversion be split out?

I will have a look.

> 
>> @@ -213,17 +214,17 @@ static inline l4_pgentry_t l4e_from_paddr(paddr_t pa, unsigned int flags)
>>   #ifndef __ASSEMBLY__
>>   
>>   /* Page-table type. */
>> -typedef struct { u64 pfn; } pagetable_t;
>> -#define pagetable_get_paddr(x)  ((paddr_t)(x).pfn << PAGE_SHIFT)
>> +typedef struct { mfn_t mfn; } pagetable_t;
>> +#define PAGETABLE_NULL_MFN      _mfn(0)
> 
> I'd prefer to get away without this constant.
I would rather keep the constant as it makes easier to understand what 
_mfn(0) means in the context of the pagetable.

Cheers,
Jan Beulich March 30, 2020, 7:52 a.m. UTC | #3
On 28.03.2020 11:52, Julien Grall wrote:
> On 26/03/2020 15:39, Jan Beulich wrote:
>> On 22.03.2020 17:14, julien@xen.org wrote:
>>> @@ -3116,24 +3116,24 @@ int vcpu_destroy_pagetables(struct vcpu *v)
>>>         /* Free that page if non-zero */
>>>       do {
>>> -        if ( mfn )
>>> +        if ( !mfn_eq(mfn, _mfn(0)) )
>>
>> I admit I'm not fully certain either, but at the first glance
>>
>>          if ( mfn_x(mfn) )
>>
>> would seem more in line with the original code to me (and then
>> also elsewhere).
> 
> It is doing *exactly* the same things. The whole point of typesafe
> is to use typesafe helper not open-coding test everywhere.
> 
> It is also easier to spot any use of MFN 0 within the code as you
> know could grep "_mfn(0)".
> 
> Therefore I will insist to the code as-is.

What I insit on is that readability of the result of such changes be
also kept in mind. The mfn_eq() construct is (I think) clearly less
easy to read and recognize than the simpler alternative suggested.
If you want to avoid mfn_x(), how about introducing (if possible
limited to x86, assuming that MFN 0 has no special meaning on Arm)
mfn_zero()?

>>> @@ -3560,19 +3561,18 @@ long do_mmuext_op(
>>>               if ( unlikely(rc) )
>>>                   break;
>>>   -            old_mfn = pagetable_get_pfn(curr->arch.guest_table_user);
>>> +            old_mfn = pagetable_get_mfn(curr->arch.guest_table_user);
>>>               /*
>>>                * This is particularly important when getting restarted after the
>>>                * previous attempt got preempted in the put-old-MFN phase.
>>>                */
>>> -            if ( old_mfn == op.arg1.mfn )
>>> +            if ( mfn_eq(old_mfn, new_mfn) )
>>>                   break;
>>>   -            if ( op.arg1.mfn != 0 )
>>> +            if ( !mfn_eq(new_mfn, _mfn(0)) )
>>
>> At least here I would clearly prefer the old code to be kept.
> 
> See above.

I don't agree - here you're evaluating an aspect of the public
interface. MFN 0 internally having a special meaning is, while
connected to this aspect, still an implementation detail.

>>> @@ -3580,19 +3580,19 @@ long do_mmuext_op(
>>>                       else if ( rc != -ERESTART )
>>>                           gdprintk(XENLOG_WARNING,
>>>                                    "Error %d installing new mfn %" PRI_mfn "\n",
>>> -                                 rc, op.arg1.mfn);
>>> +                                 rc, mfn_x(new_mfn));
>>
>> Here I'm also not sure I see the point of the conversion.
> 
> op.arg1.mfn and mfn are technically not the same type. The
> former is a xen_pfn_t, whilst the latter is mfn_t.
> 
> In practice they are both unsigned long on x86, so it should
> be fine to use PRI_mfn. However, I think this is an abuse
> and we should aim to use the proper PRI_* for a type.

I'd be fine with switching to PRI_xen_pfn here, yes. But
especially with the "not the same type" argument what should
be logged is imo what was specified, not what we converted it
to.

>>> @@ -213,17 +214,17 @@ static inline l4_pgentry_t l4e_from_paddr(paddr_t pa, unsigned int flags)
>>>   #ifndef __ASSEMBLY__
>>>     /* Page-table type. */
>>> -typedef struct { u64 pfn; } pagetable_t;
>>> -#define pagetable_get_paddr(x)  ((paddr_t)(x).pfn << PAGE_SHIFT)
>>> +typedef struct { mfn_t mfn; } pagetable_t;
>>> +#define PAGETABLE_NULL_MFN      _mfn(0)
>>
>> I'd prefer to get away without this constant.
> I would rather keep the constant as it makes easier to
> understand what _mfn(0) means in the context of the pagetable.

If this was used outside of the accessor definitions, I'd
probably agree. But the accessor definitions exist specifically
to abstract away such things from use sites. Hence, bike-
shedding or not, if Andrew was clearly agreeing with your view,
I'd accept it. If he's indifferent, I'd prefer the #define to
be dropped.

Jan
Julien Grall April 18, 2020, 10:23 a.m. UTC | #4
Hi,

On 30/03/2020 08:52, Jan Beulich wrote:
> On 28.03.2020 11:52, Julien Grall wrote:
>> On 26/03/2020 15:39, Jan Beulich wrote:
>>> On 22.03.2020 17:14, julien@xen.org wrote:
>>>> @@ -3116,24 +3116,24 @@ int vcpu_destroy_pagetables(struct vcpu *v)
>>>>          /* Free that page if non-zero */
>>>>        do {
>>>> -        if ( mfn )
>>>> +        if ( !mfn_eq(mfn, _mfn(0)) )
>>>
>>> I admit I'm not fully certain either, but at the first glance
>>>
>>>           if ( mfn_x(mfn) )
>>>
>>> would seem more in line with the original code to me (and then
>>> also elsewhere).
>>
>> It is doing *exactly* the same things. The whole point of typesafe
>> is to use typesafe helper not open-coding test everywhere.
>>
>> It is also easier to spot any use of MFN 0 within the code as you
>> know could grep "_mfn(0)".
>>
>> Therefore I will insist to the code as-is.
> 
> What I insit on is that readability of the result of such changes be
> also kept in mind. The mfn_eq() construct is (I think) clearly less
> easy to read and recognize than the simpler alternative suggested.

If mfn_eq() is less clear, then where do you draw the line when the 
macro should or not be used?

> If you want to avoid mfn_x(), how about introducing (if possible
> limited to x86, assuming that MFN 0 has no special meaning on Arm)
> mfn_zero()?

Zero has not special meaning on Arm, so we could limit to x86.

> 
>>>> @@ -3560,19 +3561,18 @@ long do_mmuext_op(
>>>>                if ( unlikely(rc) )
>>>>                    break;
>>>>    -            old_mfn = pagetable_get_pfn(curr->arch.guest_table_user);
>>>> +            old_mfn = pagetable_get_mfn(curr->arch.guest_table_user);
>>>>                /*
>>>>                 * This is particularly important when getting restarted after the
>>>>                 * previous attempt got preempted in the put-old-MFN phase.
>>>>                 */
>>>> -            if ( old_mfn == op.arg1.mfn )
>>>> +            if ( mfn_eq(old_mfn, new_mfn) )
>>>>                    break;
>>>>    -            if ( op.arg1.mfn != 0 )
>>>> +            if ( !mfn_eq(new_mfn, _mfn(0)) )
>>>
>>> At least here I would clearly prefer the old code to be kept.
>>
>> See above.
> 
> I don't agree - here you're evaluating an aspect of the public
> interface. MFN 0 internally having a special meaning is, while
> connected to this aspect, still an implementation detail.

Fair enough.

> 
>>>> @@ -3580,19 +3580,19 @@ long do_mmuext_op(
>>>>                        else if ( rc != -ERESTART )
>>>>                            gdprintk(XENLOG_WARNING,
>>>>                                     "Error %d installing new mfn %" PRI_mfn "\n",
>>>> -                                 rc, op.arg1.mfn);
>>>> +                                 rc, mfn_x(new_mfn));
>>>
>>> Here I'm also not sure I see the point of the conversion.
>>
>> op.arg1.mfn and mfn are technically not the same type. The
>> former is a xen_pfn_t, whilst the latter is mfn_t.
>>
>> In practice they are both unsigned long on x86, so it should
>> be fine to use PRI_mfn. However, I think this is an abuse
>> and we should aim to use the proper PRI_* for a type.
> 
> I'd be fine with switching to PRI_xen_pfn here, yes. But
> especially with the "not the same type" argument what should
> be logged is imo what was specified, not what we converted it
> to.

Fair point. I will switch back to op.arg1.mfn.

> 
>>>> @@ -213,17 +214,17 @@ static inline l4_pgentry_t l4e_from_paddr(paddr_t pa, unsigned int flags)
>>>>    #ifndef __ASSEMBLY__
>>>>      /* Page-table type. */
>>>> -typedef struct { u64 pfn; } pagetable_t;
>>>> -#define pagetable_get_paddr(x)  ((paddr_t)(x).pfn << PAGE_SHIFT)
>>>> +typedef struct { mfn_t mfn; } pagetable_t;
>>>> +#define PAGETABLE_NULL_MFN      _mfn(0)
>>>
>>> I'd prefer to get away without this constant.
>> I would rather keep the constant as it makes easier to
>> understand what _mfn(0) means in the context of the pagetable.
> 
> If this was used outside of the accessor definitions, I'd
> probably agree. But the accessor definitions exist specifically
> to abstract away such things from use sites. Hence, bike-
> shedding or not, if Andrew was clearly agreeing with your view,
> I'd accept it. If he's indifferent, I'd prefer the #define to
> be dropped.

Andrew, do you have any opinion?

Cheers,
Jan Beulich April 20, 2020, 9:16 a.m. UTC | #5
On 18.04.2020 12:23, Julien Grall wrote:
> On 30/03/2020 08:52, Jan Beulich wrote:
>> On 28.03.2020 11:52, Julien Grall wrote:
>>> On 26/03/2020 15:39, Jan Beulich wrote:
>>>> On 22.03.2020 17:14, julien@xen.org wrote:
>>>>> @@ -3116,24 +3116,24 @@ int vcpu_destroy_pagetables(struct vcpu *v)
>>>>>          /* Free that page if non-zero */
>>>>>        do {
>>>>> -        if ( mfn )
>>>>> +        if ( !mfn_eq(mfn, _mfn(0)) )
>>>>
>>>> I admit I'm not fully certain either, but at the first glance
>>>>
>>>>           if ( mfn_x(mfn) )
>>>>
>>>> would seem more in line with the original code to me (and then
>>>> also elsewhere).
>>>
>>> It is doing *exactly* the same things. The whole point of typesafe
>>> is to use typesafe helper not open-coding test everywhere.
>>>
>>> It is also easier to spot any use of MFN 0 within the code as you
>>> know could grep "_mfn(0)".
>>>
>>> Therefore I will insist to the code as-is.
>>
>> What I insit on is that readability of the result of such changes be
>> also kept in mind. The mfn_eq() construct is (I think) clearly less
>> easy to read and recognize than the simpler alternative suggested.
> 
> If mfn_eq() is less clear, then where do you draw the line when the
> macro should or not be used?

I'm afraid there may not be a clear line to draw until everything
got converted. I do seem to recall though that, perhaps in a
different context, Andrew recently agreed with my view here (Andrew,
please correct me if I'm wrong). It being a fuzzy thing, I guess
maintainers get to judge ...

Jan
Julien Grall April 20, 2020, 10:10 a.m. UTC | #6
Hi,

On 20/04/2020 10:16, Jan Beulich wrote:
> On 18.04.2020 12:23, Julien Grall wrote:
>> On 30/03/2020 08:52, Jan Beulich wrote:
>>> On 28.03.2020 11:52, Julien Grall wrote:
>>>> On 26/03/2020 15:39, Jan Beulich wrote:
>>>>> On 22.03.2020 17:14, julien@xen.org wrote:
>>>>>> @@ -3116,24 +3116,24 @@ int vcpu_destroy_pagetables(struct vcpu *v)
>>>>>>           /* Free that page if non-zero */
>>>>>>         do {
>>>>>> -        if ( mfn )
>>>>>> +        if ( !mfn_eq(mfn, _mfn(0)) )
>>>>>
>>>>> I admit I'm not fully certain either, but at the first glance
>>>>>
>>>>>            if ( mfn_x(mfn) )
>>>>>
>>>>> would seem more in line with the original code to me (and then
>>>>> also elsewhere).
>>>>
>>>> It is doing *exactly* the same things. The whole point of typesafe
>>>> is to use typesafe helper not open-coding test everywhere.
>>>>
>>>> It is also easier to spot any use of MFN 0 within the code as you
>>>> know could grep "_mfn(0)".
>>>>
>>>> Therefore I will insist to the code as-is.
>>>
>>> What I insit on is that readability of the result of such changes be
>>> also kept in mind. The mfn_eq() construct is (I think) clearly less
>>> easy to read and recognize than the simpler alternative suggested.
>>
>> If mfn_eq() is less clear, then where do you draw the line when the
>> macro should or not be used?
> 
> I'm afraid there may not be a clear line to draw until everything
> got converted.

I am sorry but this doesn't add up. Here you say that we can't have a 
clear line to draw until everything is converted but...

> I do seem to recall though that, perhaps in a
> different context, Andrew recently agreed with my view here (Andrew,
> please correct me if I'm wrong). It being a fuzzy thing, I guess
> maintainers get to judge ...

... here you say the maintainers get to decide when to use mfn_eq() (or 
other typesafe construction). So basically, we would never be able to 
fully convert the code and therefore never draw a line.

As I am trying to convert x86 to use typesafe, I would like a bit more 
guidelines on your expectation for typesafe. Can you clarify it?

Cheers,
Jan Beulich April 20, 2020, 12:14 p.m. UTC | #7
On 20.04.2020 12:10, Julien Grall wrote:
> Hi,
> 
> On 20/04/2020 10:16, Jan Beulich wrote:
>> On 18.04.2020 12:23, Julien Grall wrote:
>>> On 30/03/2020 08:52, Jan Beulich wrote:
>>>> On 28.03.2020 11:52, Julien Grall wrote:
>>>>> On 26/03/2020 15:39, Jan Beulich wrote:
>>>>>> On 22.03.2020 17:14, julien@xen.org wrote:
>>>>>>> @@ -3116,24 +3116,24 @@ int vcpu_destroy_pagetables(struct vcpu *v)
>>>>>>>           /* Free that page if non-zero */
>>>>>>>         do {
>>>>>>> -        if ( mfn )
>>>>>>> +        if ( !mfn_eq(mfn, _mfn(0)) )
>>>>>>
>>>>>> I admit I'm not fully certain either, but at the first glance
>>>>>>
>>>>>>            if ( mfn_x(mfn) )
>>>>>>
>>>>>> would seem more in line with the original code to me (and then
>>>>>> also elsewhere).
>>>>>
>>>>> It is doing *exactly* the same things. The whole point of typesafe
>>>>> is to use typesafe helper not open-coding test everywhere.
>>>>>
>>>>> It is also easier to spot any use of MFN 0 within the code as you
>>>>> know could grep "_mfn(0)".
>>>>>
>>>>> Therefore I will insist to the code as-is.
>>>>
>>>> What I insit on is that readability of the result of such changes be
>>>> also kept in mind. The mfn_eq() construct is (I think) clearly less
>>>> easy to read and recognize than the simpler alternative suggested.
>>>
>>> If mfn_eq() is less clear, then where do you draw the line when the
>>> macro should or not be used?
>>
>> I'm afraid there may not be a clear line to draw until everything
>> got converted.
> 
> I am sorry but this doesn't add up. Here you say that we can't have
> a clear line to draw until everything is converted but...
> 
>> I do seem to recall though that, perhaps in a
>> different context, Andrew recently agreed with my view here (Andrew,
>> please correct me if I'm wrong). It being a fuzzy thing, I guess
>> maintainers get to judge ...
> 
> ... here you say the maintainers get to decide when to use mfn_eq()
> (or other typesafe construction). So basically, we would never be
> able to fully convert the code and therefore never draw a line.

Why? Eventually both sides of an mfn_eq() will be of type mfn_t. And
in the specific case hand even with my alternative suggestion no
further change would  be needed down the road. Type safety is for
things like function argument passing and assignments and alike. A
leaf expression like "if ( mfn_x() )" is not type-unsafe in any way.

Jan
diff mbox series

Patch

diff --git a/xen/arch/x86/domain.c b/xen/arch/x86/domain.c
index 15750ce210..18d8fda9bd 100644
--- a/xen/arch/x86/domain.c
+++ b/xen/arch/x86/domain.c
@@ -952,25 +952,27 @@  int arch_set_info_guest(
     }
     else
     {
-        unsigned long pfn = pagetable_get_pfn(v->arch.guest_table);
+        mfn_t mfn = pagetable_get_mfn(v->arch.guest_table);
         bool fail;
 
         if ( !compat )
         {
-            fail = xen_pfn_to_cr3(pfn) != c.nat->ctrlreg[3];
+            fail = mfn_to_cr3(mfn) != c.nat->ctrlreg[3];
             if ( pagetable_is_null(v->arch.guest_table_user) )
                 fail |= c.nat->ctrlreg[1] || !(flags & VGCF_in_kernel);
             else
             {
-                pfn = pagetable_get_pfn(v->arch.guest_table_user);
-                fail |= xen_pfn_to_cr3(pfn) != c.nat->ctrlreg[1];
+                mfn = pagetable_get_mfn(v->arch.guest_table_user);
+                fail |= mfn_to_cr3(mfn) != c.nat->ctrlreg[1];
             }
-        } else {
-            l4_pgentry_t *l4tab = map_domain_page(_mfn(pfn));
+        }
+        else
+        {
+            l4_pgentry_t *l4tab = map_domain_page(mfn);
 
-            pfn = l4e_get_pfn(*l4tab);
+            mfn = l4e_get_mfn(*l4tab);
             unmap_domain_page(l4tab);
-            fail = compat_pfn_to_cr3(pfn) != c.cmp->ctrlreg[3];
+            fail = compat_pfn_to_cr3(mfn_x(mfn)) != c.cmp->ctrlreg[3];
         }
 
         for ( i = 0; i < ARRAY_SIZE(v->arch.pv.gdt_frames); ++i )
diff --git a/xen/arch/x86/domctl.c b/xen/arch/x86/domctl.c
index ed86762fa6..02596c3810 100644
--- a/xen/arch/x86/domctl.c
+++ b/xen/arch/x86/domctl.c
@@ -1611,11 +1611,11 @@  void arch_get_info_guest(struct vcpu *v, vcpu_guest_context_u c)
 
         if ( !compat )
         {
-            c.nat->ctrlreg[3] = xen_pfn_to_cr3(
-                pagetable_get_pfn(v->arch.guest_table));
+            c.nat->ctrlreg[3] = mfn_to_cr3(
+                pagetable_get_mfn(v->arch.guest_table));
             c.nat->ctrlreg[1] =
                 pagetable_is_null(v->arch.guest_table_user) ? 0
-                : xen_pfn_to_cr3(pagetable_get_pfn(v->arch.guest_table_user));
+                : mfn_to_cr3(pagetable_get_mfn(v->arch.guest_table_user));
         }
         else
         {
diff --git a/xen/arch/x86/hvm/vmx/vmcs.c b/xen/arch/x86/hvm/vmx/vmcs.c
index 4c23645454..1f39367253 100644
--- a/xen/arch/x86/hvm/vmx/vmcs.c
+++ b/xen/arch/x86/hvm/vmx/vmcs.c
@@ -1290,7 +1290,7 @@  static int construct_vmcs(struct vcpu *v)
         struct p2m_domain *p2m = p2m_get_hostp2m(d);
         struct ept_data *ept = &p2m->ept;
 
-        ept->mfn = pagetable_get_pfn(p2m_get_pagetable(p2m));
+        ept->mfn = mfn_x(pagetable_get_mfn(p2m_get_pagetable(p2m)));
         __vmwrite(EPT_POINTER, ept->eptp);
 
         __vmwrite(HOST_PAT, XEN_MSR_PAT);
diff --git a/xen/arch/x86/hvm/vmx/vmx.c b/xen/arch/x86/hvm/vmx/vmx.c
index d265ed46ad..a1e3a19c0a 100644
--- a/xen/arch/x86/hvm/vmx/vmx.c
+++ b/xen/arch/x86/hvm/vmx/vmx.c
@@ -2110,7 +2110,7 @@  static void vmx_vcpu_update_eptp(struct vcpu *v)
         p2m = p2m_get_hostp2m(d);
 
     ept = &p2m->ept;
-    ept->mfn = pagetable_get_pfn(p2m_get_pagetable(p2m));
+    ept->mfn = mfn_x(pagetable_get_mfn(p2m_get_pagetable(p2m)));
 
     vmx_vmcs_enter(v);
 
diff --git a/xen/arch/x86/hvm/vmx/vvmx.c b/xen/arch/x86/hvm/vmx/vvmx.c
index f049920196..84b47ef277 100644
--- a/xen/arch/x86/hvm/vmx/vvmx.c
+++ b/xen/arch/x86/hvm/vmx/vvmx.c
@@ -1149,7 +1149,7 @@  static uint64_t get_shadow_eptp(struct vcpu *v)
     struct p2m_domain *p2m = p2m_get_nestedp2m(v);
     struct ept_data *ept = &p2m->ept;
 
-    ept->mfn = pagetable_get_pfn(p2m_get_pagetable(p2m));
+    ept->mfn = mfn_x(pagetable_get_mfn(p2m_get_pagetable(p2m)));
     return ept->eptp;
 }
 
diff --git a/xen/arch/x86/mm.c b/xen/arch/x86/mm.c
index 7c0f81759a..aa0bf3d0ee 100644
--- a/xen/arch/x86/mm.c
+++ b/xen/arch/x86/mm.c
@@ -3085,7 +3085,7 @@  int put_old_guest_table(struct vcpu *v)
 
 int vcpu_destroy_pagetables(struct vcpu *v)
 {
-    unsigned long mfn = pagetable_get_pfn(v->arch.guest_table);
+    mfn_t mfn = pagetable_get_mfn(v->arch.guest_table);
     struct page_info *page = NULL;
     int rc = put_old_guest_table(v);
     bool put_guest_table_user = false;
@@ -3102,9 +3102,9 @@  int vcpu_destroy_pagetables(struct vcpu *v)
      */
     if ( is_pv_32bit_vcpu(v) )
     {
-        l4_pgentry_t *l4tab = map_domain_page(_mfn(mfn));
+        l4_pgentry_t *l4tab = map_domain_page(mfn);
 
-        mfn = l4e_get_pfn(*l4tab);
+        mfn = l4e_get_mfn(*l4tab);
         l4e_write(l4tab, l4e_empty());
         unmap_domain_page(l4tab);
     }
@@ -3116,24 +3116,24 @@  int vcpu_destroy_pagetables(struct vcpu *v)
 
     /* Free that page if non-zero */
     do {
-        if ( mfn )
+        if ( !mfn_eq(mfn, _mfn(0)) )
         {
-            page = mfn_to_page(_mfn(mfn));
+            page = mfn_to_page(mfn);
             if ( paging_mode_refcounts(v->domain) )
                 put_page(page);
             else
                 rc = put_page_and_type_preemptible(page);
-            mfn = 0;
+            mfn = _mfn(0);
         }
 
         if ( !rc && put_guest_table_user )
         {
             /* Drop ref to guest_table_user (from MMUEXT_NEW_USER_BASEPTR) */
-            mfn = pagetable_get_pfn(v->arch.guest_table_user);
+            mfn = pagetable_get_mfn(v->arch.guest_table_user);
             v->arch.guest_table_user = pagetable_null();
             put_guest_table_user = false;
         }
-    } while ( mfn );
+    } while ( !mfn_eq(mfn, _mfn(0)) );
 
     /*
      * If a "put" operation was interrupted, finish things off in
@@ -3551,7 +3551,8 @@  long do_mmuext_op(
             break;
 
         case MMUEXT_NEW_USER_BASEPTR: {
-            unsigned long old_mfn;
+            mfn_t old_mfn;
+            mfn_t new_mfn = _mfn(op.arg1.mfn);
 
             if ( unlikely(currd != pg_owner) )
                 rc = -EPERM;
@@ -3560,19 +3561,18 @@  long do_mmuext_op(
             if ( unlikely(rc) )
                 break;
 
-            old_mfn = pagetable_get_pfn(curr->arch.guest_table_user);
+            old_mfn = pagetable_get_mfn(curr->arch.guest_table_user);
             /*
              * This is particularly important when getting restarted after the
              * previous attempt got preempted in the put-old-MFN phase.
              */
-            if ( old_mfn == op.arg1.mfn )
+            if ( mfn_eq(old_mfn, new_mfn) )
                 break;
 
-            if ( op.arg1.mfn != 0 )
+            if ( !mfn_eq(new_mfn, _mfn(0)) )
             {
-                rc = get_page_and_type_from_mfn(
-                    _mfn(op.arg1.mfn), PGT_root_page_table, currd, PTF_preemptible);
-
+                rc = get_page_and_type_from_mfn(new_mfn, PGT_root_page_table,
+                                                currd, PTF_preemptible);
                 if ( unlikely(rc) )
                 {
                     if ( rc == -EINTR )
@@ -3580,19 +3580,19 @@  long do_mmuext_op(
                     else if ( rc != -ERESTART )
                         gdprintk(XENLOG_WARNING,
                                  "Error %d installing new mfn %" PRI_mfn "\n",
-                                 rc, op.arg1.mfn);
+                                 rc, mfn_x(new_mfn));
                     break;
                 }
 
                 if ( VM_ASSIST(currd, m2p_strict) )
-                    zap_ro_mpt(_mfn(op.arg1.mfn));
+                    zap_ro_mpt(new_mfn);
             }
 
-            curr->arch.guest_table_user = pagetable_from_pfn(op.arg1.mfn);
+            curr->arch.guest_table_user = pagetable_from_mfn(new_mfn);
 
-            if ( old_mfn != 0 )
+            if ( !mfn_eq(old_mfn, _mfn(0)) )
             {
-                page = mfn_to_page(_mfn(old_mfn));
+                page = mfn_to_page(old_mfn);
 
                 switch ( rc = put_page_and_type_preemptible(page) )
                 {
diff --git a/xen/arch/x86/mm/hap/hap.c b/xen/arch/x86/mm/hap/hap.c
index a6d5e39b02..051e92169a 100644
--- a/xen/arch/x86/mm/hap/hap.c
+++ b/xen/arch/x86/mm/hap/hap.c
@@ -394,7 +394,7 @@  static mfn_t hap_make_monitor_table(struct vcpu *v)
     l4_pgentry_t *l4e;
     mfn_t m4mfn;
 
-    ASSERT(pagetable_get_pfn(v->arch.monitor_table) == 0);
+    ASSERT(pagetable_is_null(v->arch.monitor_table));
 
     if ( (pg = hap_alloc(d)) == NULL )
         goto oom;
diff --git a/xen/arch/x86/mm/p2m-ept.c b/xen/arch/x86/mm/p2m-ept.c
index eb0f0edfef..346696e469 100644
--- a/xen/arch/x86/mm/p2m-ept.c
+++ b/xen/arch/x86/mm/p2m-ept.c
@@ -1366,7 +1366,7 @@  void p2m_init_altp2m_ept(struct domain *d, unsigned int i)
 
     p2m->ept.ad = hostp2m->ept.ad;
     ept = &p2m->ept;
-    ept->mfn = pagetable_get_pfn(p2m_get_pagetable(p2m));
+    ept->mfn = mfn_x(pagetable_get_mfn(p2m_get_pagetable(p2m)));
     d->arch.altp2m_eptp[array_index_nospec(i, MAX_EPTP)] = ept->eptp;
 }
 
diff --git a/xen/arch/x86/mm/p2m-pt.c b/xen/arch/x86/mm/p2m-pt.c
index eb66077496..cccb06c26e 100644
--- a/xen/arch/x86/mm/p2m-pt.c
+++ b/xen/arch/x86/mm/p2m-pt.c
@@ -867,7 +867,7 @@  static void p2m_pt_change_entry_type_global(struct p2m_domain *p2m,
     unsigned long gfn = 0;
     unsigned int i, changed;
 
-    if ( pagetable_get_pfn(p2m_get_pagetable(p2m)) == 0 )
+    if ( pagetable_is_null(p2m_get_pagetable(p2m)) )
         return;
 
     ASSERT(hap_enabled(p2m->domain));
@@ -950,7 +950,7 @@  long p2m_pt_audit_p2m(struct p2m_domain *p2m)
     ASSERT(pod_locked_by_me(p2m));
 
     /* Audit part one: walk the domain's p2m table, checking the entries. */
-    if ( pagetable_get_pfn(p2m_get_pagetable(p2m)) != 0 )
+    if ( !pagetable_is_null(p2m_get_pagetable(p2m)) )
     {
         l2_pgentry_t *l2e;
         l1_pgentry_t *l1e;
diff --git a/xen/arch/x86/mm/p2m.c b/xen/arch/x86/mm/p2m.c
index 9f51370327..45b4b784d3 100644
--- a/xen/arch/x86/mm/p2m.c
+++ b/xen/arch/x86/mm/p2m.c
@@ -702,7 +702,7 @@  int p2m_alloc_table(struct p2m_domain *p2m)
         return -EINVAL;
     }
 
-    if ( pagetable_get_pfn(p2m_get_pagetable(p2m)) != 0 )
+    if ( !pagetable_is_null(p2m_get_pagetable(p2m)) )
     {
         P2M_ERROR("p2m already allocated for this domain\n");
         p2m_unlock(p2m);
diff --git a/xen/arch/x86/mm/shadow/multi.c b/xen/arch/x86/mm/shadow/multi.c
index b6afc0fba4..5751dae344 100644
--- a/xen/arch/x86/mm/shadow/multi.c
+++ b/xen/arch/x86/mm/shadow/multi.c
@@ -1520,7 +1520,7 @@  sh_make_monitor_table(struct vcpu *v)
 {
     struct domain *d = v->domain;
 
-    ASSERT(pagetable_get_pfn(v->arch.monitor_table) == 0);
+    ASSERT(pagetable_is_null(v->arch.monitor_table));
 
     /* Guarantee we can get the memory we need */
     shadow_prealloc(d, SH_type_monitor_table, CONFIG_PAGING_LEVELS);
@@ -2351,11 +2351,11 @@  int sh_safe_not_to_sync(struct vcpu *v, mfn_t gl1mfn)
     ASSERT(mfn_valid(smfn));
 #endif
 
-    if ( pagetable_get_pfn(v->arch.shadow_table[0]) == mfn_x(smfn)
+    if ( mfn_eq(pagetable_get_mfn(v->arch.shadow_table[0]), smfn)
 #if (SHADOW_PAGING_LEVELS == 3)
-         || pagetable_get_pfn(v->arch.shadow_table[1]) == mfn_x(smfn)
-         || pagetable_get_pfn(v->arch.shadow_table[2]) == mfn_x(smfn)
-         || pagetable_get_pfn(v->arch.shadow_table[3]) == mfn_x(smfn)
+         || mfn_eq(pagetable_get_mfn(v->arch.shadow_table[1]), smfn)
+         || mfn_eq(pagetable_get_mfn(v->arch.shadow_table[2]), smfn)
+         || mfn_eq(pagetable_get_mfn(v->arch.shadow_table[3]), smfn)
 #endif
         )
         return 0;
@@ -3707,7 +3707,7 @@  sh_update_linear_entries(struct vcpu *v)
 
     /* Don't try to update the monitor table if it doesn't exist */
     if ( shadow_mode_external(d)
-         && pagetable_get_pfn(v->arch.monitor_table) == 0 )
+         && pagetable_is_null(v->arch.monitor_table) )
         return;
 
 #if SHADOW_PAGING_LEVELS == 4
@@ -3722,7 +3722,7 @@  sh_update_linear_entries(struct vcpu *v)
         if ( v == current )
         {
             __linear_l4_table[l4_linear_offset(SH_LINEAR_PT_VIRT_START)] =
-                l4e_from_pfn(pagetable_get_pfn(v->arch.shadow_table[0]),
+                l4e_from_mfn(pagetable_get_mfn(v->arch.shadow_table[0]),
                              __PAGE_HYPERVISOR_RW);
         }
         else
@@ -3730,7 +3730,7 @@  sh_update_linear_entries(struct vcpu *v)
             l4_pgentry_t *ml4e;
             ml4e = map_domain_page(pagetable_get_mfn(v->arch.monitor_table));
             ml4e[l4_table_offset(SH_LINEAR_PT_VIRT_START)] =
-                l4e_from_pfn(pagetable_get_pfn(v->arch.shadow_table[0]),
+                l4e_from_mfn(pagetable_get_mfn(v->arch.shadow_table[0]),
                              __PAGE_HYPERVISOR_RW);
             unmap_domain_page(ml4e);
         }
@@ -3964,15 +3964,15 @@  sh_update_cr3(struct vcpu *v, int do_locking, bool noflush)
     {
         ASSERT(shadow_mode_external(d));
         if ( hvm_paging_enabled(v) )
-            ASSERT(pagetable_get_pfn(v->arch.guest_table));
+            ASSERT(!pagetable_is_null(v->arch.guest_table));
         else
-            ASSERT(v->arch.guest_table.pfn
-                   == d->arch.paging.shadow.unpaged_pagetable.pfn);
+            ASSERT(mfn_eq(pagetable_get_mfn(v->arch.guest_table),
+                          pagetable_get_mfn(d->arch.paging.shadow.unpaged_pagetable)));
     }
 #endif
 
     SHADOW_PRINTK("%pv guest_table=%"PRI_mfn"\n",
-                  v, (unsigned long)pagetable_get_pfn(v->arch.guest_table));
+                  v, mfn_x(pagetable_get_mfn(v->arch.guest_table)));
 
 #if GUEST_PAGING_LEVELS == 4
     if ( !(v->arch.flags & TF_kernel_mode) )
diff --git a/xen/arch/x86/pv/dom0_build.c b/xen/arch/x86/pv/dom0_build.c
index 30846b5f97..8abd5d255c 100644
--- a/xen/arch/x86/pv/dom0_build.c
+++ b/xen/arch/x86/pv/dom0_build.c
@@ -93,14 +93,14 @@  static __init void mark_pv_pt_pages_rdonly(struct domain *d,
     }
 }
 
-static __init void setup_pv_physmap(struct domain *d, unsigned long pgtbl_pfn,
+static __init void setup_pv_physmap(struct domain *d, mfn_t pgtbl_mfn,
                                     unsigned long v_start, unsigned long v_end,
                                     unsigned long vphysmap_start,
                                     unsigned long vphysmap_end,
                                     unsigned long nr_pages)
 {
     struct page_info *page = NULL;
-    l4_pgentry_t *pl4e, *l4start = map_domain_page(_mfn(pgtbl_pfn));
+    l4_pgentry_t *pl4e, *l4start = map_domain_page(pgtbl_mfn);
     l3_pgentry_t *pl3e = NULL;
     l2_pgentry_t *pl2e = NULL;
     l1_pgentry_t *pl1e = NULL;
@@ -760,11 +760,9 @@  int __init dom0_construct_pv(struct domain *d,
 
     /* Set up the phys->machine table if not part of the initial mapping. */
     if ( parms.p2m_base != UNSET_ADDR )
-    {
-        pfn = pagetable_get_pfn(v->arch.guest_table);
-        setup_pv_physmap(d, pfn, v_start, v_end, vphysmap_start, vphysmap_end,
+        setup_pv_physmap(d, pagetable_get_mfn(v->arch.guest_table),
+                         v_start, v_end, vphysmap_start, vphysmap_end,
                          nr_pages);
-    }
 
     /* Write the phys->machine and machine->phys table entries. */
     for ( pfn = 0; pfn < count; pfn++ )
diff --git a/xen/arch/x86/traps.c b/xen/arch/x86/traps.c
index 4aa7c35be4..04a3ebc0a2 100644
--- a/xen/arch/x86/traps.c
+++ b/xen/arch/x86/traps.c
@@ -247,12 +247,12 @@  static void compat_show_guest_stack(struct vcpu *v,
     if ( v != current )
     {
         struct vcpu *vcpu;
-        unsigned long mfn;
+        mfn_t mfn;
 
         ASSERT(guest_kernel_mode(v, regs));
-        mfn = read_cr3() >> PAGE_SHIFT;
+        mfn = cr3_to_mfn(read_cr3());
         for_each_vcpu( v->domain, vcpu )
-            if ( pagetable_get_pfn(vcpu->arch.guest_table) == mfn )
+            if ( mfn_eq(pagetable_get_mfn(vcpu->arch.guest_table), mfn) )
                 break;
         if ( !vcpu )
         {
diff --git a/xen/include/asm-x86/page.h b/xen/include/asm-x86/page.h
index 624dbbb949..377ba14f6e 100644
--- a/xen/include/asm-x86/page.h
+++ b/xen/include/asm-x86/page.h
@@ -18,6 +18,7 @@ 
 #ifndef __ASSEMBLY__
 # include <asm/types.h>
 # include <xen/lib.h>
+# include <xen/mm_types.h>
 #endif
 
 #include <asm/x86_64/page.h>
@@ -213,17 +214,17 @@  static inline l4_pgentry_t l4e_from_paddr(paddr_t pa, unsigned int flags)
 #ifndef __ASSEMBLY__
 
 /* Page-table type. */
-typedef struct { u64 pfn; } pagetable_t;
-#define pagetable_get_paddr(x)  ((paddr_t)(x).pfn << PAGE_SHIFT)
+typedef struct { mfn_t mfn; } pagetable_t;
+#define PAGETABLE_NULL_MFN      _mfn(0)
+
+#define pagetable_get_paddr(x)  mfn_to_maddr((x).mfn)
 #define pagetable_get_page(x)   mfn_to_page(pagetable_get_mfn(x))
-#define pagetable_get_pfn(x)    ((x).pfn)
-#define pagetable_get_mfn(x)    _mfn(((x).pfn))
-#define pagetable_is_null(x)    ((x).pfn == 0)
-#define pagetable_from_pfn(pfn) ((pagetable_t) { (pfn) })
-#define pagetable_from_mfn(mfn) ((pagetable_t) { mfn_x(mfn) })
+#define pagetable_get_mfn(x)    ((x).mfn)
+#define pagetable_is_null(x)    mfn_eq((x).mfn, PAGETABLE_NULL_MFN)
+#define pagetable_from_mfn(mfn) ((pagetable_t) { mfn })
 #define pagetable_from_page(pg) pagetable_from_mfn(page_to_mfn(pg))
-#define pagetable_from_paddr(p) pagetable_from_pfn((p)>>PAGE_SHIFT)
-#define pagetable_null()        pagetable_from_pfn(0)
+#define pagetable_from_paddr(p) pagetable_from_mfn(maddr_to_mfn(p))
+#define pagetable_null()        pagetable_from_mfn(PAGETABLE_NULL_MFN)
 
 void clear_page_sse2(void *);
 void copy_page_sse2(void *, const void *);