diff mbox

[intel-sgx-kernel-dev,v9,RESEND] intel_sgx: migrate to radix tree for addressing enclave pages

Message ID 20161215205848.26207-1-jarkko.sakkinen@linux.intel.com (mailing list archive)
State New, archived
Headers show

Commit Message

Jarkko Sakkinen Dec. 15, 2016, 8:58 p.m. UTC
Radix tree is the fastest data structure for addressing so it does
make sense to replace RB tree with a Radix tree.

Signed-off-by: Jarkko Sakkinen <jarkko.sakkinen@linux.intel.com>
---
Ugh, copule of fixes to sgx_vma_do_fault were not squashed :( Sorry. Here's
a fixed version.
 drivers/platform/x86/intel_sgx.h       |  6 ++---
 drivers/platform/x86/intel_sgx_ioctl.c | 40 +++++++------------------------
 drivers/platform/x86/intel_sgx_util.c  | 44 +++++++---------------------------
 drivers/platform/x86/intel_sgx_vma.c   | 20 +++++++++-------
 4 files changed, 31 insertions(+), 79 deletions(-)
diff mbox

Patch

diff --git a/drivers/platform/x86/intel_sgx.h b/drivers/platform/x86/intel_sgx.h
index c8b65fe..ed9e8e6 100644
--- a/drivers/platform/x86/intel_sgx.h
+++ b/drivers/platform/x86/intel_sgx.h
@@ -68,6 +68,7 @@ 
 #include <linux/sched.h>
 #include <linux/workqueue.h>
 #include <linux/mmu_notifier.h>
+#include <linux/radix-tree.h>
 
 #define SGX_EINIT_SPIN_COUNT	20
 #define SGX_EINIT_SLEEP_COUNT	50
@@ -115,7 +116,6 @@  struct sgx_encl_page {
 	struct sgx_va_page *va_page;
 	unsigned int va_offset;
 	struct sgx_pcmd pcmd;
-	struct rb_node node;
 };
 
 struct sgx_tgid_ctx {
@@ -146,7 +146,7 @@  struct sgx_encl {
 	unsigned long base;
 	unsigned long size;
 	struct list_head va_pages;
-	struct rb_root encl_rb;
+	struct radix_tree_root page_tree;
 	struct list_head add_page_reqs;
 	struct work_struct add_page_work;
 	struct sgx_encl_page secs_page;
@@ -211,8 +211,6 @@  void sgx_unpin_mm(struct sgx_encl *encl);
 void sgx_invalidate(struct sgx_encl *encl);
 int sgx_find_encl(struct mm_struct *mm, unsigned long addr,
 		  struct vm_area_struct **vma);
-struct sgx_encl_page *sgx_encl_find_page(struct sgx_encl *encl,
-					 unsigned long addr);
 void sgx_encl_release(struct kref *ref);
 void sgx_tgid_ctx_release(struct kref *ref);
 
diff --git a/drivers/platform/x86/intel_sgx_ioctl.c b/drivers/platform/x86/intel_sgx_ioctl.c
index 8543373..3a4a8fa 100644
--- a/drivers/platform/x86/intel_sgx_ioctl.c
+++ b/drivers/platform/x86/intel_sgx_ioctl.c
@@ -138,33 +138,6 @@  void sgx_tgid_ctx_release(struct kref *ref)
 	kfree(pe);
 }
 
-static int encl_rb_insert(struct rb_root *root,
-			  struct sgx_encl_page *data)
-{
-	struct rb_node **new = &root->rb_node;
-	struct rb_node *parent = NULL;
-
-	/* Figure out where to put new node */
-	while (*new) {
-		struct sgx_encl_page *this =
-			container_of(*new, struct sgx_encl_page, node);
-
-		parent = *new;
-		if (data->addr < this->addr)
-			new = &((*new)->rb_left);
-		else if (data->addr > this->addr)
-			new = &((*new)->rb_right);
-		else
-			return -EFAULT;
-	}
-
-	/* Add new node and rebalance tree. */
-	rb_link_node(&data->node, parent, new);
-	rb_insert_color(&data->node, root);
-
-	return 0;
-}
-
 static int sgx_find_and_get_encl(unsigned long addr, struct sgx_encl **encl)
 {
 	struct mm_struct *mm = current->mm;
@@ -538,6 +511,7 @@  static long sgx_ioc_enclave_create(struct file *filep, unsigned int cmd,
 	kref_init(&encl->refcount);
 	INIT_LIST_HEAD(&encl->add_page_reqs);
 	INIT_LIST_HEAD(&encl->va_pages);
+	INIT_RADIX_TREE(&encl->page_tree, GFP_KERNEL);
 	INIT_LIST_HEAD(&encl->load_list);
 	INIT_LIST_HEAD(&encl->encl_list);
 	mutex_init(&encl->lock);
@@ -713,7 +687,7 @@  static int __encl_add_page(struct sgx_encl *encl,
 		goto out;
 	}
 
-	if (sgx_encl_find_page(encl, addp->addr)) {
+	if (radix_tree_lookup(&encl->page_tree, addp->addr >> PAGE_SHIFT)) {
 		ret = -EEXIST;
 		goto out;
 	}
@@ -730,6 +704,13 @@  static int __encl_add_page(struct sgx_encl *encl,
 		goto out;
 	}
 
+	ret = radix_tree_insert(&encl->page_tree, encl_page->addr >> PAGE_SHIFT,
+				encl_page);
+	if (ret) {
+		sgx_put_backing(backing, false /* write */);
+		goto out;
+	}
+
 	user_vaddr = kmap(backing);
 	tmp_vaddr = kmap(tmp_page);
 	memcpy(user_vaddr, tmp_vaddr, PAGE_SIZE);
@@ -757,9 +738,6 @@  static int __encl_add_page(struct sgx_encl *encl,
 		kfree(req);
 		sgx_free_va_slot(encl_page->va_page,
 				 encl_page->va_offset);
-	} else {
-		ret = encl_rb_insert(&encl->encl_rb, encl_page);
-		WARN_ON(ret);
 	}
 
 	mutex_unlock(&encl->lock);
diff --git a/drivers/platform/x86/intel_sgx_util.c b/drivers/platform/x86/intel_sgx_util.c
index f6f7dde0..2c390c5 100644
--- a/drivers/platform/x86/intel_sgx_util.c
+++ b/drivers/platform/x86/intel_sgx_util.c
@@ -120,13 +120,9 @@  struct vm_area_struct *sgx_find_vma(struct sgx_encl *encl, unsigned long addr)
 void sgx_zap_tcs_ptes(struct sgx_encl *encl, struct vm_area_struct *vma)
 {
 	struct sgx_encl_page *entry;
-	struct rb_node *rb;
 
-	rb = rb_first(&encl->encl_rb);
-	while (rb) {
-		entry = container_of(rb, struct sgx_encl_page, node);
-		rb = rb_next(rb);
-		if (entry->epc_page && (entry->flags & SGX_ENCL_PAGE_TCS) &&
+	list_for_each_entry(entry, &encl->load_list, load_list) {
+		if ((entry->flags & SGX_ENCL_PAGE_TCS) &&
 		    entry->addr >= vma->vm_start &&
 		    entry->addr < vma->vm_end)
 			zap_vma_ptes(vma, entry->addr, PAGE_SIZE);
@@ -203,55 +199,31 @@  int sgx_find_encl(struct mm_struct *mm, unsigned long addr,
 	return 0;
 }
 
-struct sgx_encl_page *sgx_encl_find_page(struct sgx_encl *encl,
-					 unsigned long addr)
-{
-	struct rb_node *node = encl->encl_rb.rb_node;
-
-	while (node) {
-		struct sgx_encl_page *data =
-			container_of(node, struct sgx_encl_page, node);
-
-		if (data->addr > addr)
-			node = node->rb_left;
-		else if (data->addr < addr)
-			node = node->rb_right;
-		else
-			return data;
-	}
-
-	return NULL;
-}
-
 void sgx_encl_release(struct kref *ref)
 {
-	struct rb_node *rb1, *rb2;
 	struct sgx_encl_page *entry;
 	struct sgx_va_page *va_page;
-	struct sgx_encl *encl =
-		container_of(ref, struct sgx_encl, refcount);
+	struct sgx_encl *encl = container_of(ref, struct sgx_encl, refcount);
+	struct radix_tree_iter iter;
+	void **slot;
 
 	mutex_lock(&sgx_tgid_ctx_mutex);
 	if (!list_empty(&encl->encl_list))
 		list_del(&encl->encl_list);
-
 	mutex_unlock(&sgx_tgid_ctx_mutex);
 
 	if (encl->mmu_notifier.ops)
 		mmu_notifier_unregister_no_release(&encl->mmu_notifier,
 						   encl->mm);
 
-	rb1 = rb_first(&encl->encl_rb);
-	while (rb1) {
-		entry = container_of(rb1, struct sgx_encl_page, node);
-		rb2 = rb_next(rb1);
-		rb_erase(rb1, &encl->encl_rb);
+	radix_tree_for_each_slot(slot, &encl->page_tree, &iter, 0) {
+		entry = *slot;
 		if (entry->epc_page) {
 			list_del(&entry->load_list);
 			sgx_free_page(entry->epc_page, encl, 0);
 		}
+		radix_tree_delete(&encl->page_tree, entry->addr >> PAGE_SHIFT);
 		kfree(entry);
-		rb1 = rb2;
 	}
 
 	while (!list_empty(&encl->va_pages)) {
diff --git a/drivers/platform/x86/intel_sgx_vma.c b/drivers/platform/x86/intel_sgx_vma.c
index d588932..1ff55c1 100644
--- a/drivers/platform/x86/intel_sgx_vma.c
+++ b/drivers/platform/x86/intel_sgx_vma.c
@@ -169,16 +169,20 @@  static struct sgx_encl_page *sgx_vma_do_fault(struct vm_area_struct *vma,
 	if (!encl)
 		return ERR_PTR(-EFAULT);
 
-	entry = sgx_encl_find_page(encl, addr);
-	if (!entry)
-		return ERR_PTR(-EFAULT);
+	mutex_lock(&encl->lock);
 
-	epc_page = sgx_alloc_page(encl->tgid_ctx, SGX_ALLOC_ATOMIC);
-	if (IS_ERR(epc_page))
-		/* reinterpret the type as we return an error */
-		return (struct sgx_encl_page *)epc_page;
+	entry = radix_tree_lookup(&encl->page_tree, addr >> PAGE_SHIFT);
+	if (!entry) {
+		entry = ERR_PTR(-EFAULT);
+		goto out;
+	}
 
-	mutex_lock(&encl->lock);
+	epc_page = sgx_alloc_page(encl->tgid_ctx, SGX_ALLOC_ATOMIC);
+	if (IS_ERR(epc_page)) {
+		entry = (struct sgx_encl_page *)epc_page;
+		epc_page = NULL;
+		goto out;
+	}
 
 	if (encl->flags & SGX_ENCL_DEAD) {
 		entry = ERR_PTR(-EFAULT);