diff mbox series

[14/16] paged_pages field is MEM_PAGING-only

Message ID ab136038-0242-086c-9e67-02c47e1db3e0@suse.com (mailing list archive)
State New, archived
Headers show
Series x86/mm: large parts of P2M code and struct p2m_domain are HVM-only | expand

Commit Message

Jan Beulich July 5, 2021, 4:14 p.m. UTC
Conditionalize it and its uses accordingly.

Signed-off-by: Jan Beulich <jbeulich@suse.com>
---
I was on the edge of introducing a helper for
atomic_read(&d->paged_pages) but decided against because of
dump_domains() not being able to use it sensibly (I really want to omit
the output field altogether there when !MEM_PAGING).

Comments

Tamas K Lengyel July 6, 2021, 12:44 p.m. UTC | #1
On Mon, Jul 5, 2021 at 12:14 PM Jan Beulich <jbeulich@suse.com> wrote:
>
> Conditionalize it and its uses accordingly.
>
> Signed-off-by: Jan Beulich <jbeulich@suse.com>

For the mem_sharing bits:
Acked-by: Tamas K Lengyel <tamas@tklengyel.com>

The rest also look fine to me as you can consider having an R-b as
well for those bits.
George Dunlap Feb. 14, 2022, 3:38 p.m. UTC | #2
> On Jul 5, 2021, at 5:14 PM, Jan Beulich <JBeulich@suse.com> wrote:
> 
> Conditionalize it and its uses accordingly.
> 
> Signed-off-by: Jan Beulich <jbeulich@suse.com>

Reviewed-by: George Dunlap <george.dunlap@citrix.com>
diff mbox series

Patch

--- a/xen/arch/x86/mm/mem_sharing.c
+++ b/xen/arch/x86/mm/mem_sharing.c
@@ -1213,6 +1213,7 @@  int add_to_physmap(struct domain *sd, un
     }
     else
     {
+#ifdef CONFIG_MEM_PAGING
         /*
          * There is a chance we're plugging a hole where a paged out
          * page was.
@@ -1238,6 +1239,7 @@  int add_to_physmap(struct domain *sd, un
                 put_page(cpage);
             }
         }
+#endif
     }
 
     atomic_inc(&nr_saved_mfns);
--- a/xen/arch/x86/mm/p2m.c
+++ b/xen/arch/x86/mm/p2m.c
@@ -666,11 +666,13 @@  p2m_add_page(struct domain *d, gfn_t gfn
             /* Count how man PoD entries we'll be replacing if successful */
             pod_count++;
         }
+#ifdef CONFIG_MEM_PAGING
         else if ( p2m_is_paging(ot) && (ot != p2m_ram_paging_out) )
         {
             /* We're plugging a hole in the physmap where a paged out page was */
             atomic_dec(&d->paged_pages);
         }
+#endif
     }
 
     /* Then, look for m->p mappings for this range and deal with them */
--- a/xen/common/domctl.c
+++ b/xen/common/domctl.c
@@ -114,7 +114,11 @@  void getdomaininfo(struct domain *d, str
 #else
     info->shr_pages         = 0;
 #endif
+#ifdef CONFIG_MEM_PAGING
     info->paged_pages       = atomic_read(&d->paged_pages);
+#else
+    info->paged_pages       = 0;
+#endif
     info->shared_info_frame =
         gfn_x(mfn_to_gfn(d, _mfn(virt_to_mfn(d->shared_info))));
     BUG_ON(SHARED_M2P(info->shared_info_frame));
--- a/xen/common/keyhandler.c
+++ b/xen/common/keyhandler.c
@@ -278,14 +278,18 @@  static void dump_domains(unsigned char k
 #ifdef CONFIG_MEM_SHARING
                " shared_pages=%u"
 #endif
+#ifdef CONFIG_MEM_PAGING
                " paged_pages=%u"
+#endif
                " dirty_cpus={%*pbl} max_pages=%u\n",
                domain_tot_pages(d), d->xenheap_pages,
 #ifdef CONFIG_MEM_SHARING
                atomic_read(&d->shr_pages),
 #endif
-               atomic_read(&d->paged_pages), CPUMASK_PR(d->dirty_cpumask),
-               d->max_pages);
+#ifdef CONFIG_MEM_PAGING
+               atomic_read(&d->paged_pages),
+#endif
+               CPUMASK_PR(d->dirty_cpumask), d->max_pages);
         printk("    handle=%02x%02x%02x%02x-%02x%02x-%02x%02x-"
                "%02x%02x-%02x%02x%02x%02x%02x%02x vm_assist=%08lx\n",
                d->handle[ 0], d->handle[ 1], d->handle[ 2], d->handle[ 3],
--- a/xen/include/xen/sched.h
+++ b/xen/include/xen/sched.h
@@ -390,7 +390,9 @@  struct domain
     atomic_t         shr_pages;         /* shared pages */
 #endif
 
+#ifdef CONFIG_MEM_PAGING
     atomic_t         paged_pages;       /* paged-out pages */
+#endif
 
     /* Scheduling. */
     void            *sched_priv;    /* scheduler-specific data */