diff mbox series

[RFC] lkdtm/usercopy: Add tests for other memory types

Message ID 20220512183613.1069697-1-keescook@chromium.org (mailing list archive)
State Superseded
Headers show
Series [RFC] lkdtm/usercopy: Add tests for other memory types | expand

Commit Message

Kees Cook May 12, 2022, 6:36 p.m. UTC
Add coverage for the recently added usercopy checks for vmap, kmap, and
folios.

Cc: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
Cc: Matthew Wilcox (Oracle) <willy@infradead.org>
Cc: Arnd Bergmann <arnd@arndb.de>
Signed-off-by: Kees Cook <keescook@chromium.org>
---
Hi,

So, this kmap test fails the "good" copy step... Is copy_to_user()
limited somewhere else to not touch kmap'd pages? I can't find it...

Also, is my use of folios correct here?

-Kees
---
 drivers/misc/lkdtm/usercopy.c | 104 ++++++++++++++++++++++++++++++++++
 1 file changed, 104 insertions(+)

Comments

Matthew Wilcox May 12, 2022, 6:56 p.m. UTC | #1
On Thu, May 12, 2022 at 11:36:13AM -0700, Kees Cook wrote:
> +static void lkdtm_USERCOPY_FOLIO(void)
> +{
> +	struct folio *folio;
> +	void *addr;
> +
> +	/*
> +	 * FIXME: Folio checking currently misses 0-order allocations, so
> +	 * allocate and bump forward to the last page.
> +	 */
> +	folio = folio_alloc(GFP_KERNEL | __GFP_ZERO, 1);
> +	if (!folio) {
> +		pr_err("folio_alloc() failed!?\n");
> +		return;
> +	}
> +	addr = page_address(&folio->page);

Ideally, code shouldn't be using &folio->page.  If it is, we have a
gap in the folio API.  Fortunately, we have folio_address().

> +	if (addr) {
> +		do_usercopy_page_span("folio", addr + PAGE_SIZE);
> +	}
> +	folio_put(folio);
> +}

Other than that, this looks sane to me.
Kees Cook May 12, 2022, 7:12 p.m. UTC | #2
On Thu, May 12, 2022 at 07:56:13PM +0100, Matthew Wilcox wrote:
> On Thu, May 12, 2022 at 11:36:13AM -0700, Kees Cook wrote:
> > +static void lkdtm_USERCOPY_FOLIO(void)
> > +{
> > +	struct folio *folio;
> > +	void *addr;
> > +
> > +	/*
> > +	 * FIXME: Folio checking currently misses 0-order allocations, so
> > +	 * allocate and bump forward to the last page.
> > +	 */
> > +	folio = folio_alloc(GFP_KERNEL | __GFP_ZERO, 1);
> > +	if (!folio) {
> > +		pr_err("folio_alloc() failed!?\n");
> > +		return;
> > +	}
> > +	addr = page_address(&folio->page);
> 
> Ideally, code shouldn't be using &folio->page.  If it is, we have a
> gap in the folio API.  Fortunately, we have folio_address().

Ah! Perfect, thanks. In trying to find the right alloc/free pair I
missed folio_address() :)
diff mbox series

Patch

diff --git a/drivers/misc/lkdtm/usercopy.c b/drivers/misc/lkdtm/usercopy.c
index 945806db2a13..d1c585bb7a8d 100644
--- a/drivers/misc/lkdtm/usercopy.c
+++ b/drivers/misc/lkdtm/usercopy.c
@@ -5,6 +5,7 @@ 
  */
 #include "lkdtm.h"
 #include <linux/slab.h>
+#include <linux/highmem.h>
 #include <linux/vmalloc.h>
 #include <linux/sched/task_stack.h>
 #include <linux/mman.h>
@@ -341,6 +342,106 @@  static void lkdtm_USERCOPY_KERNEL(void)
 	vm_munmap(user_addr, PAGE_SIZE);
 }
 
+/*
+ * This expects "kaddr" to point to a PAGE_SIZE allocation, which means
+ * a more complete test that would include copy_from_user() would risk
+ * memory corruption. Just test copy_to_user() here, as that exercises
+ * almost exactly the same code paths.
+ */
+static void do_usercopy_page_span(const char *name, void *kaddr)
+{
+	unsigned long uaddr;
+
+	uaddr = vm_mmap(NULL, 0, PAGE_SIZE, PROT_READ | PROT_WRITE,
+			MAP_ANONYMOUS | MAP_PRIVATE, 0);
+	if (uaddr >= TASK_SIZE) {
+		pr_warn("Failed to allocate user memory\n");
+		return;
+	}
+
+	/* Initialize contents. */
+	memset(kaddr, 0xAA, PAGE_SIZE);
+
+	/* Bump the kaddr forward to detect a page-spanning overflow. */
+	kaddr += PAGE_SIZE / 2;
+
+	pr_info("attempting good copy_to_user() from kernel %s: %px\n",
+		name, kaddr);
+	if (copy_to_user((void __user *)uaddr, kaddr,
+			 unconst + (PAGE_SIZE / 2))) {
+		pr_err("copy_to_user() failed unexpectedly?!\n");
+		goto free_user;
+	}
+
+	pr_info("attempting bad copy_to_user() from kernel %s: %px\n",
+		name, kaddr);
+	if (copy_to_user((void __user *)uaddr, kaddr,
+			 unconst + PAGE_SIZE)) {
+		pr_warn("Good, copy_to_user() failed, but lacked Oops(?!)\n");
+		goto free_user;
+	}
+
+	pr_err("FAIL: bad copy_to_user() not detected!\n");
+	pr_expected_config_param(CONFIG_HARDENED_USERCOPY, "hardened_usercopy");
+
+free_user:
+	vm_munmap(uaddr, PAGE_SIZE);
+}
+
+static void lkdtm_USERCOPY_VMALLOC(void)
+{
+	void *addr;
+
+	addr = vmalloc(PAGE_SIZE);
+	if (!addr) {
+		pr_err("vmalloc() failed!?\n");
+		return;
+	}
+	do_usercopy_page_span("vmalloc", addr);
+	vfree(addr);
+}
+
+static void lkdtm_USERCOPY_KMAP(void)
+{
+	struct page *page;
+	void *addr;
+
+	page = alloc_pages(GFP_KERNEL | __GFP_ZERO, 0);
+	if (!page) {
+		pr_err("alloc_pages() failed!?\n");
+		return;
+	}
+	addr = kmap_atomic(page);
+	if (addr) {
+		do_usercopy_page_span("kmap", addr);
+		kunmap_atomic(addr);
+	} else {
+		pr_err("kmap_atomic() failed!?\n");
+	}
+	__free_pages(page, 0);
+}
+
+static void lkdtm_USERCOPY_FOLIO(void)
+{
+	struct folio *folio;
+	void *addr;
+
+	/*
+	 * FIXME: Folio checking currently misses 0-order allocations, so
+	 * allocate and bump forward to the last page.
+	 */
+	folio = folio_alloc(GFP_KERNEL | __GFP_ZERO, 1);
+	if (!folio) {
+		pr_err("folio_alloc() failed!?\n");
+		return;
+	}
+	addr = page_address(&folio->page);
+	if (addr) {
+		do_usercopy_page_span("folio", addr + PAGE_SIZE);
+	}
+	folio_put(folio);
+}
+
 void __init lkdtm_usercopy_init(void)
 {
 	/* Prepare cache that lacks SLAB_USERCOPY flag. */
@@ -365,6 +466,9 @@  static struct crashtype crashtypes[] = {
 	CRASHTYPE(USERCOPY_STACK_FRAME_TO),
 	CRASHTYPE(USERCOPY_STACK_FRAME_FROM),
 	CRASHTYPE(USERCOPY_STACK_BEYOND),
+	CRASHTYPE(USERCOPY_VMALLOC),
+	CRASHTYPE(USERCOPY_KMAP),
+	CRASHTYPE(USERCOPY_FOLIO),
 	CRASHTYPE(USERCOPY_KERNEL),
 };