@@ -195,8 +195,6 @@ void sgx_insert_pte(struct sgx_encl *encl,
struct vm_area_struct *vma);
int sgx_eremove(struct sgx_epc_page *epc_page);
struct vm_area_struct *sgx_find_vma(struct sgx_encl *encl, unsigned long addr);
-void sgx_zap_tcs_ptes(struct sgx_encl *encl,
- struct vm_area_struct *vma);
void sgx_invalidate(struct sgx_encl *encl, bool flush_cpus);
void sgx_flush_cpus(struct sgx_encl *encl);
int sgx_find_encl(struct mm_struct *mm, unsigned long addr,
@@ -108,33 +108,26 @@ struct vm_area_struct *sgx_find_vma(struct sgx_encl *encl, unsigned long addr)
return NULL;
}
-void sgx_zap_tcs_ptes(struct sgx_encl *encl, struct vm_area_struct *vma)
+static void sgx_zap_tcs_ptes(struct sgx_encl *encl)
{
- struct sgx_epc_page *tmp;
+ struct vm_area_struct *vma;
struct sgx_encl_page *entry;
+ struct radix_tree_iter iter;
+ void **slot;
- list_for_each_entry(tmp, &encl->load_list, list) {
- entry = tmp->encl_page;
- if ((entry->flags & SGX_ENCL_PAGE_TCS) &&
- entry->addr >= vma->vm_start &&
- entry->addr < vma->vm_end)
- zap_vma_ptes(vma, entry->addr, PAGE_SIZE);
+ radix_tree_for_each_slot(slot, &encl->page_tree, &iter, 0) {
+ entry = *slot;
+ if (entry->epc_page && (entry->flags & SGX_ENCL_PAGE_TCS)) {
+ vma = sgx_find_vma(encl, entry->addr);
+ if (vma)
+ zap_vma_ptes(vma, entry->addr, PAGE_SIZE);
+ }
}
}
void sgx_invalidate(struct sgx_encl *encl, bool flush_cpus)
{
- struct vm_area_struct *vma;
- unsigned long addr;
-
- for (addr = encl->base; addr < (encl->base + encl->size);
- addr = vma->vm_end) {
- vma = sgx_find_vma(encl, addr);
- if (vma)
- sgx_zap_tcs_ptes(encl, vma);
- else
- break;
- }
+ sgx_zap_tcs_ptes(encl);
encl->flags |= SGX_ENCL_DEAD;
In sgx_zap_tcs_ptes, iterate over an enclave's pages using its radix tree, i.e. page_tree, instead of its linked list, i.e. load_list. This removes a dependency on sgx_encl's load_list, which will allow for the removal of load_list. Walk the pages to look for loaded TCS and only conditionally search for a TCS page's VMA when necessary. Walking the pages before VMAs improves the performance of sgx_zap_tcs_ptes from O(m * log(m) * n) to O(log(m) * n), where m is the number of VMAs and n is the number of pages. This mitigates any potential performance regression that would occur due to walking all of an enclave's pages as opposed to only its loaded pages. Signed-off-by: Sean Christopherson <sean.j.christopherson@intel.com> --- drivers/platform/x86/intel_sgx/sgx.h | 2 -- drivers/platform/x86/intel_sgx/sgx_util.c | 31 ++++++++++++------------------- 2 files changed, 12 insertions(+), 21 deletions(-)