diff mbox series

[13/16] shr_pages field is MEM_SHARING-only

Message ID ec21ca25-73bc-ddb7-8674-b7a59adb441c@suse.com (mailing list archive)
State New
Headers show
Series x86/mm: large parts of P2M code and struct p2m_domain are HVM-only | expand

Commit Message

Jan Beulich July 5, 2021, 4:13 p.m. UTC
Conditionalize it and its uses accordingly. The main goal though is to
demonstrate that x86's p2m_teardown() is now empty when !HVM, which in
particular means the last remaining use of p2m_lock() in this cases goes
away.

Signed-off-by: Jan Beulich <jbeulich@suse.com>
---
I was on the edge of introducing a helper for atomic_read(&d->shr_pages)
but decided against because of dump_domains() not being able to use it
sensibly (I really want to omit the output field altogether there when
!MEM_SHARING).

Comments

Tamas K Lengyel July 6, 2021, 12:42 p.m. UTC | #1
On Mon, Jul 5, 2021 at 12:13 PM Jan Beulich <jbeulich@suse.com> wrote:
>
> Conditionalize it and its uses accordingly. The main goal though is to
> demonstrate that x86's p2m_teardown() is now empty when !HVM, which in
> particular means the last remaining use of p2m_lock() in this cases goes
> away.
>
> Signed-off-by: Jan Beulich <jbeulich@suse.com>

Reviewed-by: Tamas K Lengyel <tamas@tklengyel.com>
diff mbox series

Patch

--- a/xen/arch/x86/mm/p2m-basic.c
+++ b/xen/arch/x86/mm/p2m-basic.c
@@ -159,7 +159,6 @@  void p2m_teardown(struct p2m_domain *p2m
 {
 #ifdef CONFIG_HVM
     struct page_info *pg;
-#endif
     struct domain *d;
 
     if ( !p2m )
@@ -169,16 +168,17 @@  void p2m_teardown(struct p2m_domain *p2m
 
     p2m_lock(p2m);
 
+#ifdef CONFIG_MEM_SHARING
     ASSERT(atomic_read(&d->shr_pages) == 0);
+#endif
 
-#ifdef CONFIG_HVM
     p2m->phys_table = pagetable_null();
 
     while ( (pg = page_list_remove_head(&p2m->pages)) )
         d->arch.paging.free_page(d, pg);
-#endif
 
     p2m_unlock(p2m);
+#endif
 }
 
 void p2m_final_teardown(struct domain *d)
--- a/xen/common/domctl.c
+++ b/xen/common/domctl.c
@@ -109,7 +109,11 @@  void getdomaininfo(struct domain *d, str
     info->tot_pages         = domain_tot_pages(d);
     info->max_pages         = d->max_pages;
     info->outstanding_pages = d->outstanding_pages;
+#ifdef CONFIG_MEM_SHARING
     info->shr_pages         = atomic_read(&d->shr_pages);
+#else
+    info->shr_pages         = 0;
+#endif
     info->paged_pages       = atomic_read(&d->paged_pages);
     info->shared_info_frame =
         gfn_x(mfn_to_gfn(d, _mfn(virt_to_mfn(d->shared_info))));
--- a/xen/common/keyhandler.c
+++ b/xen/common/keyhandler.c
@@ -274,9 +274,16 @@  static void dump_domains(unsigned char k
         printk("    refcnt=%d dying=%d pause_count=%d\n",
                atomic_read(&d->refcnt), d->is_dying,
                atomic_read(&d->pause_count));
-        printk("    nr_pages=%d xenheap_pages=%d shared_pages=%u paged_pages=%u "
-               "dirty_cpus={%*pbl} max_pages=%u\n",
-               domain_tot_pages(d), d->xenheap_pages, atomic_read(&d->shr_pages),
+        printk("    nr_pages=%u xenheap_pages=%u"
+#ifdef CONFIG_MEM_SHARING
+               " shared_pages=%u"
+#endif
+               " paged_pages=%u"
+               " dirty_cpus={%*pbl} max_pages=%u\n",
+               domain_tot_pages(d), d->xenheap_pages,
+#ifdef CONFIG_MEM_SHARING
+               atomic_read(&d->shr_pages),
+#endif
                atomic_read(&d->paged_pages), CPUMASK_PR(d->dirty_cpumask),
                d->max_pages);
         printk("    handle=%02x%02x%02x%02x-%02x%02x-%02x%02x-"
--- a/xen/include/xen/sched.h
+++ b/xen/include/xen/sched.h
@@ -385,7 +385,11 @@  struct domain
     unsigned int     outstanding_pages; /* pages claimed but not possessed */
     unsigned int     max_pages;         /* maximum value for domain_tot_pages() */
     unsigned int     extra_pages;       /* pages not included in domain_tot_pages() */
+
+#ifdef CONFIG_MEM_SHARING
     atomic_t         shr_pages;         /* shared pages */
+#endif
+
     atomic_t         paged_pages;       /* paged-out pages */
 
     /* Scheduling. */