@@ -247,7 +247,8 @@ static int sgx_validate_secs(const struct sgx_secs *secs,
static struct sgx_encl_page *sgx_encl_page_alloc(struct sgx_encl *encl,
unsigned long addr,
- unsigned long prot)
+ unsigned long prot,
+ u64 page_type)
{
struct sgx_encl_page *encl_page;
int ret;
@@ -258,6 +259,8 @@ static struct sgx_encl_page *sgx_encl_page_alloc(struct sgx_encl *encl,
if (!encl_page)
return ERR_PTR(-ENOMEM);
encl_page->desc = addr;
+ if (page_type == SGX_SECINFO_TCS)
+ encl_page->desc |= SGX_ENCL_PAGE_TCS;
encl_page->encl = encl;
encl_page->vm_prot_bits = calc_vm_prot_bits(prot, 0);
ret = radix_tree_insert(&encl->page_tree, PFN_DOWN(encl_page->desc),
@@ -475,7 +478,6 @@ static int __sgx_encl_add_page(struct sgx_encl *encl,
unsigned int mrmask)
{
unsigned long page_index = sgx_encl_get_index(encl, encl_page);
- u64 page_type = secinfo->flags & SGX_SECINFO_PAGE_TYPE_MASK;
struct sgx_add_page_req *req = NULL;
struct page *backing;
void *backing_ptr;
@@ -494,8 +496,7 @@ static int __sgx_encl_add_page(struct sgx_encl *encl,
backing_ptr = kmap(backing);
memcpy(backing_ptr, data, PAGE_SIZE);
kunmap(backing);
- if (page_type == SGX_SECINFO_TCS)
- encl_page->desc |= SGX_ENCL_PAGE_TCS;
+
memcpy(&req->secinfo, secinfo, sizeof(*secinfo));
req->encl = encl;
req->encl_page = encl_page;
@@ -532,7 +533,7 @@ static int sgx_encl_add_page(struct sgx_encl *encl, unsigned long addr,
goto err_out_unlock;
}
- encl_page = sgx_encl_page_alloc(encl, addr, prot);
+ encl_page = sgx_encl_page_alloc(encl, addr, prot, page_type);
if (IS_ERR(encl_page)) {
ret = PTR_ERR(encl_page);
goto err_out_shrink;
Set SGX_ENCL_PAGE_TCS when encl_page->desc is initialized in sgx_encl_page_alloc() to improve readability, and so that the code isn't affected when the bulk of __sgx_encl_add_page() is rewritten to remove the EADD worker in a future patch. Signed-off-by: Sean Christopherson <sean.j.christopherson@intel.com> --- arch/x86/kernel/cpu/sgx/driver/ioctl.c | 11 ++++++----- 1 file changed, 6 insertions(+), 5 deletions(-)