diff mbox series

[1/4] x86/PoD: simplify / improve p2m_pod_cache_add()

Message ID 2525b63f-f666-2430-2c22-b1b7b0d5d7f0@suse.com (mailing list archive)
State Superseded
Headers show
Series x86/mm: address observations made while working on XSA-388 | expand

Commit Message

Jan Beulich Dec. 1, 2021, 10:53 a.m. UTC
Avoid recurring MFN -> page or page -> MFN translations. Drop the pretty
pointless local variable "p". Make sure the MFN logged in a debugging
error message is actually the offending one. Adjust style.

Signed-off-by: Jan Beulich <jbeulich@suse.com>

Comments

Andrew Cooper Dec. 1, 2021, 12:01 p.m. UTC | #1
On 01/12/2021 10:53, Jan Beulich wrote:
> Avoid recurring MFN -> page or page -> MFN translations. Drop the pretty
> pointless local variable "p". Make sure the MFN logged in a debugging
> error message is actually the offending one. Adjust style.
>
> Signed-off-by: Jan Beulich <jbeulich@suse.com>
>
> --- a/xen/arch/x86/mm/p2m-pod.c
> +++ b/xen/arch/x86/mm/p2m-pod.c
> @@ -58,14 +58,10 @@ p2m_pod_cache_add(struct p2m_domain *p2m
>                    unsigned int order)
>  {
>      unsigned long i;
> -    struct page_info *p;
>      struct domain *d = p2m->domain;
> +    mfn_t mfn = page_to_mfn(page);
>  
>  #ifndef NDEBUG
> -    mfn_t mfn;
> -
> -    mfn = page_to_mfn(page);
> -
>      /* Check to make sure this is a contiguous region */
>      if ( mfn_x(mfn) & ((1UL << order) - 1) )
>      {
> @@ -74,17 +70,14 @@ p2m_pod_cache_add(struct p2m_domain *p2m
>          return -1;
>      }
>  
> -    for ( i = 0; i < 1UL << order ; i++)
> +    for ( i = 0; i < (1UL << order); i++)
>      {
> -        struct domain * od;
> +        const struct domain *od = page_get_owner(page + i);
>  
> -        p = mfn_to_page(mfn_add(mfn, i));
> -        od = page_get_owner(p);
>          if ( od != d )
>          {
> -            printk("%s: mfn %lx expected owner d%d, got owner d%d!\n",
> -                   __func__, mfn_x(mfn), d->domain_id,
> -                   od ? od->domain_id : -1);
> +            printk("%s: mfn %lx owner: expected %pd, got %pd!\n",
> +                   __func__, mfn_x(mfn) + i, d, od);

Take the opportunity to drop the superfluous punctuation?


Looking through this code, no callers check for any errors, and the only
failure paths are in a debug region.  The function really ought to
become void, or at the very least, switch to -EINVAL to avoid aliasing
-EPERM.

Furthermore, in all(?) cases that it fails, we'll leak the domain
allocated page, which at the very best means the VM is going to hit
memory limit problems.  i.e. nothing good can come.

Both failures are internal memory handling errors in Xen, so the least
invasive option is probably to switch to ASSERT() (for the alignment
check), and ASSERT_UNREACHABLE() here.  That also addresses the issue
that these printk()'s aren't ratelimited, and used within several loops.

>              return -1;
>          }
>      }
> @@ -98,16 +91,12 @@ p2m_pod_cache_add(struct p2m_domain *p2m
>       * promise to provide zero pages. So we scrub pages before using.
>       */
>      for ( i = 0; i < (1UL << order); i++ )
> -        clear_domain_page(mfn_add(page_to_mfn(page), i));
> +        clear_domain_page(mfn_add(mfn, i));

(For a future change,) this is double scrubbing in most cases.

~Andrew

>  
>      /* First, take all pages off the domain list */
>      lock_page_alloc(p2m);
> -    for ( i = 0; i < 1UL << order ; i++ )
> -    {
> -        p = page + i;
> -        page_list_del(p, &d->page_list);
> -    }
> -
> +    for ( i = 0; i < (1UL << order); i++ )
> +        page_list_del(page + i, &d->page_list);
>      unlock_page_alloc(p2m);
>  
>      /* Then add to the appropriate populate-on-demand list. */
>
>
Jan Beulich Dec. 2, 2021, 9:41 a.m. UTC | #2
On 01.12.2021 13:01, Andrew Cooper wrote:
> On 01/12/2021 10:53, Jan Beulich wrote:
>> --- a/xen/arch/x86/mm/p2m-pod.c
>> +++ b/xen/arch/x86/mm/p2m-pod.c
>> @@ -58,14 +58,10 @@ p2m_pod_cache_add(struct p2m_domain *p2m
>>                    unsigned int order)
>>  {
>>      unsigned long i;
>> -    struct page_info *p;
>>      struct domain *d = p2m->domain;
>> +    mfn_t mfn = page_to_mfn(page);
>>  
>>  #ifndef NDEBUG
>> -    mfn_t mfn;
>> -
>> -    mfn = page_to_mfn(page);
>> -
>>      /* Check to make sure this is a contiguous region */
>>      if ( mfn_x(mfn) & ((1UL << order) - 1) )
>>      {
>> @@ -74,17 +70,14 @@ p2m_pod_cache_add(struct p2m_domain *p2m
>>          return -1;
>>      }
>>  
>> -    for ( i = 0; i < 1UL << order ; i++)
>> +    for ( i = 0; i < (1UL << order); i++)
>>      {
>> -        struct domain * od;
>> +        const struct domain *od = page_get_owner(page + i);
>>  
>> -        p = mfn_to_page(mfn_add(mfn, i));
>> -        od = page_get_owner(p);
>>          if ( od != d )
>>          {
>> -            printk("%s: mfn %lx expected owner d%d, got owner d%d!\n",
>> -                   __func__, mfn_x(mfn), d->domain_id,
>> -                   od ? od->domain_id : -1);
>> +            printk("%s: mfn %lx owner: expected %pd, got %pd!\n",
>> +                   __func__, mfn_x(mfn) + i, d, od);
> 
> Take the opportunity to drop the superfluous punctuation?

Fine with me; means just the exclamation mark though, unless you tell
me what else you would see sensibly dropped. I'd certainly prefer to
keep both colons (the latter of which I'm actually adding here).

> Looking through this code, no callers check for any errors, and the only
> failure paths are in a debug region.  The function really ought to
> become void, or at the very least, switch to -EINVAL to avoid aliasing
> -EPERM.

I'd be okay making this change (I may prefer to avoid EINVAL if I can
find a better match), but I wouldn't want to switch the function to
void - callers would better care about the return value.

> Furthermore, in all(?) cases that it fails, we'll leak the domain
> allocated page, which at the very best means the VM is going to hit
> memory limit problems.  i.e. nothing good can come.
> 
> Both failures are internal memory handling errors in Xen, so the least
> invasive option is probably to switch to ASSERT() (for the alignment
> check), and ASSERT_UNREACHABLE() here.  That also addresses the issue
> that these printk()'s aren't ratelimited, and used within several loops.

Doing this right here, otoh, feels like going too far with a single
change. That's not the least because I would question the value of
doing these checks in debug builds only or tying them to NDEBUG
rather than CONFIG_DEBUG. After all the alignment check could have
triggered prior to the XSA-388 fixes.

Jan
diff mbox series

Patch

--- a/xen/arch/x86/mm/p2m-pod.c
+++ b/xen/arch/x86/mm/p2m-pod.c
@@ -58,14 +58,10 @@  p2m_pod_cache_add(struct p2m_domain *p2m
                   unsigned int order)
 {
     unsigned long i;
-    struct page_info *p;
     struct domain *d = p2m->domain;
+    mfn_t mfn = page_to_mfn(page);
 
 #ifndef NDEBUG
-    mfn_t mfn;
-
-    mfn = page_to_mfn(page);
-
     /* Check to make sure this is a contiguous region */
     if ( mfn_x(mfn) & ((1UL << order) - 1) )
     {
@@ -74,17 +70,14 @@  p2m_pod_cache_add(struct p2m_domain *p2m
         return -1;
     }
 
-    for ( i = 0; i < 1UL << order ; i++)
+    for ( i = 0; i < (1UL << order); i++)
     {
-        struct domain * od;
+        const struct domain *od = page_get_owner(page + i);
 
-        p = mfn_to_page(mfn_add(mfn, i));
-        od = page_get_owner(p);
         if ( od != d )
         {
-            printk("%s: mfn %lx expected owner d%d, got owner d%d!\n",
-                   __func__, mfn_x(mfn), d->domain_id,
-                   od ? od->domain_id : -1);
+            printk("%s: mfn %lx owner: expected %pd, got %pd!\n",
+                   __func__, mfn_x(mfn) + i, d, od);
             return -1;
         }
     }
@@ -98,16 +91,12 @@  p2m_pod_cache_add(struct p2m_domain *p2m
      * promise to provide zero pages. So we scrub pages before using.
      */
     for ( i = 0; i < (1UL << order); i++ )
-        clear_domain_page(mfn_add(page_to_mfn(page), i));
+        clear_domain_page(mfn_add(mfn, i));
 
     /* First, take all pages off the domain list */
     lock_page_alloc(p2m);
-    for ( i = 0; i < 1UL << order ; i++ )
-    {
-        p = page + i;
-        page_list_del(p, &d->page_list);
-    }
-
+    for ( i = 0; i < (1UL << order); i++ )
+        page_list_del(page + i, &d->page_list);
     unlock_page_alloc(p2m);
 
     /* Then add to the appropriate populate-on-demand list. */