diff mbox

[v4,8/8] mm: Make sure pages are scrubbed

Message ID 1495209040-11101-9-git-send-email-boris.ostrovsky@oracle.com (mailing list archive)
State New, archived
Headers show

Commit Message

Boris Ostrovsky May 19, 2017, 3:50 p.m. UTC
Add a debug Kconfig option that will make page allocator verify
that pages that were supposed to be scrubbed are, in fact, clean.

Signed-off-by: Boris Ostrovsky <boris.ostrovsky@oracle.com>
---
Changes in v4:
* Don't (debug-)scrub (and don't check for poison) before bootscrub completes
* Adjust scrub pattern

 xen/Kconfig.debug       |  7 ++++++
 xen/common/page_alloc.c | 59 ++++++++++++++++++++++++++++++++++++++++++++++++-
 2 files changed, 65 insertions(+), 1 deletion(-)

Comments

Jan Beulich June 12, 2017, 8:43 a.m. UTC | #1
>>> On 19.05.17 at 17:50, <boris.ostrovsky@oracle.com> wrote:
> --- a/xen/Kconfig.debug
> +++ b/xen/Kconfig.debug
> @@ -114,6 +114,13 @@ config DEVICE_TREE_DEBUG
>  	  logged in the Xen ring buffer.
>  	  If unsure, say N here.
>  
> +config SCRUB_DEBUG
> +    bool "Page scrubbing test"
> +    default DEBUG
> +    ---help---

Indentation.

> --- a/xen/common/page_alloc.c
> +++ b/xen/common/page_alloc.c
> @@ -170,6 +170,10 @@ boolean_param("bootscrub", opt_bootscrub);
>  static unsigned long __initdata opt_bootscrub_chunk = MB(128);
>  size_param("bootscrub_chunk", opt_bootscrub_chunk);
>  
> +#ifdef CONFIG_SCRUB_DEBUG
> +static bool boot_scrub_done;

It's not all that important as it's debugging code only, but -
__read_mostly?

> +static void check_one_page(struct page_info *pg)
> +{
> +#ifdef CONFIG_SCRUB_DEBUG
> +    mfn_t mfn = _mfn(page_to_mfn(pg));
> +    uint64_t *ptr;

const

> +    unsigned i;

unsigned int

> @@ -2269,7 +2325,8 @@ void scrub_one_page(struct page_info *pg)
>  
>  #ifndef NDEBUG
>      /* Avoid callers relying on allocations returning zeroed pages. */
> -    unmap_domain_page(memset(__map_domain_page(pg), 0xc2, PAGE_SIZE));
> +    unmap_domain_page(memset(__map_domain_page(pg),
> +                             SCRUB_BYTE_PATTERN, PAGE_SIZE));
>  #else
>      /* For a production build, clear_page() is the fastest way to scrub. */
>      clear_domain_page(_mfn(page_to_mfn(pg)));

With EXPERT=y SCRUB_DEBUG can also be selected for non-debug
builds, in which case they will be zeroed but check_one_page() will
nevertheless check for the non-zero pattern. IOW I think the
pattern needs to be zero for non-debug builds, at which point the
#if here would probably better check whether the pattern is non-
zero.

Jan
diff mbox

Patch

diff --git a/xen/Kconfig.debug b/xen/Kconfig.debug
index 689f297..adc4162 100644
--- a/xen/Kconfig.debug
+++ b/xen/Kconfig.debug
@@ -114,6 +114,13 @@  config DEVICE_TREE_DEBUG
 	  logged in the Xen ring buffer.
 	  If unsure, say N here.
 
+config SCRUB_DEBUG
+    bool "Page scrubbing test"
+    default DEBUG
+    ---help---
+	  Verify that pages that need to be scrubbed before being allocated to
+	  a guest are indeed scrubbed.
+
 endif # DEBUG || EXPERT
 
 endmenu
diff --git a/xen/common/page_alloc.c b/xen/common/page_alloc.c
index e744d81..c1ac26d 100644
--- a/xen/common/page_alloc.c
+++ b/xen/common/page_alloc.c
@@ -170,6 +170,10 @@  boolean_param("bootscrub", opt_bootscrub);
 static unsigned long __initdata opt_bootscrub_chunk = MB(128);
 size_param("bootscrub_chunk", opt_bootscrub_chunk);
 
+#ifdef CONFIG_SCRUB_DEBUG
+static bool boot_scrub_done;
+#endif
+
 /*
  * Bit width of the DMA heap -- used to override NUMA-node-first.
  * allocation strategy, which can otherwise exhaust low memory.
@@ -694,6 +698,39 @@  static void page_list_add_scrub(struct page_info *pg, unsigned int node,
         page_list_add(pg, &heap(node, zone, order));
 }
 
+/* SCRUB_PATTERN needs to be a repeating series of bytes. */
+#define SCRUB_PATTERN        0xc2c2c2c2c2c2c2c2ULL
+#define SCRUB_BYTE_PATTERN   (SCRUB_PATTERN & 0xff)
+
+static void poison_one_page(struct page_info *pg)
+{
+#ifdef CONFIG_SCRUB_DEBUG
+    mfn_t mfn = _mfn(page_to_mfn(pg));
+    uint64_t *ptr;
+
+    ptr = map_domain_page(mfn);
+    *ptr = ~SCRUB_PATTERN;
+    unmap_domain_page(ptr);
+#endif
+}
+
+static void check_one_page(struct page_info *pg)
+{
+#ifdef CONFIG_SCRUB_DEBUG
+    mfn_t mfn = _mfn(page_to_mfn(pg));
+    uint64_t *ptr;
+    unsigned i;
+
+    if ( !boot_scrub_done )
+        return;
+
+    ptr = map_domain_page(mfn);
+    for ( i = 0; i < PAGE_SIZE / sizeof (*ptr); i++ )
+        ASSERT(ptr[i] == SCRUB_PATTERN);
+    unmap_domain_page(ptr);
+#endif
+}
+
 static void check_and_stop_scrub(struct page_info *head)
 {
     if ( head->u.free.scrub_state & PAGE_SCRUBBING )
@@ -919,6 +956,9 @@  static struct page_info *alloc_heap_pages(
          * guest can control its own visibility of/through the cache.
          */
         flush_page_to_ram(page_to_mfn(&pg[i]));
+
+        if ( !(memflags & MEMF_no_scrub) )
+            check_one_page(&pg[i]); 
     }
 
     spin_unlock(&heap_lock);
@@ -1270,7 +1310,10 @@  static void free_heap_pages(
         set_gpfn_from_mfn(mfn + i, INVALID_M2P_ENTRY);
 
         if ( need_scrub )
+        {
             pg[i].count_info |= PGC_need_scrub;
+            poison_one_page(&pg[i]);
+        }
     }
 
     avail[node][zone] += 1 << order;
@@ -1633,7 +1676,12 @@  static void init_heap_pages(
             nr_pages -= n;
         }
 
+#ifndef CONFIG_SCRUB_DEBUG
         free_heap_pages(pg + i, 0, false);
+#else
+        free_heap_pages(pg + i, 0, boot_scrub_done);
+#endif
+	
     }
 }
 
@@ -1899,6 +1947,10 @@  void __init scrub_heap_pages(void)
 
     printk("done.\n");
 
+#ifdef CONFIG_SCRUB_DEBUG
+    boot_scrub_done = true;
+#endif
+
     /* Now that the heap is initialized, run checks and set bounds
      * for the low mem virq algorithm. */
     setup_low_mem_virq();
@@ -2172,12 +2224,16 @@  void free_domheap_pages(struct page_info *pg, unsigned int order)
 
             spin_unlock_recursive(&d->page_alloc_lock);
 
+#ifndef CONFIG_SCRUB_DEBUG
             /*
              * Normally we expect a domain to clear pages before freeing them,
              * if it cares about the secrecy of their contents. However, after
              * a domain has died we assume responsibility for erasure.
              */
             scrub = !!d->is_dying;
+#else
+            scrub = true;
+#endif
         }
         else
         {
@@ -2269,7 +2325,8 @@  void scrub_one_page(struct page_info *pg)
 
 #ifndef NDEBUG
     /* Avoid callers relying on allocations returning zeroed pages. */
-    unmap_domain_page(memset(__map_domain_page(pg), 0xc2, PAGE_SIZE));
+    unmap_domain_page(memset(__map_domain_page(pg),
+                             SCRUB_BYTE_PATTERN, PAGE_SIZE));
 #else
     /* For a production build, clear_page() is the fastest way to scrub. */
     clear_domain_page(_mfn(page_to_mfn(pg)));