@@ -223,8 +223,7 @@ static void sgx_reclaimer_block(struct sgx_epc_page *epc_page)
mutex_unlock(&encl->lock);
}
-static int __sgx_encl_ewb(struct sgx_epc_page *epc_page,
- struct sgx_va_page *va_page, unsigned int va_offset,
+static int __sgx_encl_ewb(struct sgx_epc_page *epc_page, void *va_slot,
struct sgx_backing *backing)
{
struct sgx_pageinfo pginfo;
@@ -237,8 +236,7 @@ static int __sgx_encl_ewb(struct sgx_epc_page *epc_page,
pginfo.metadata = (unsigned long)kmap_atomic(backing->pcmd) +
backing->pcmd_offset;
- ret = __ewb(&pginfo, sgx_epc_addr(epc_page),
- sgx_epc_addr(va_page->epc_page) + va_offset);
+ ret = __ewb(&pginfo, sgx_epc_addr(epc_page), va_slot);
kunmap_atomic((void *)(unsigned long)(pginfo.metadata -
backing->pcmd_offset));
@@ -282,6 +280,7 @@ static void sgx_encl_ewb(struct sgx_epc_page *epc_page,
struct sgx_encl *encl = encl_page->encl;
struct sgx_va_page *va_page;
unsigned int va_offset;
+ void *va_slot;
int ret;
encl_page->desc &= ~SGX_ENCL_PAGE_RECLAIMED;
@@ -289,10 +288,11 @@ static void sgx_encl_ewb(struct sgx_epc_page *epc_page,
va_page = list_first_entry(&encl->va_pages, struct sgx_va_page,
list);
va_offset = sgx_alloc_va_slot(va_page);
+ va_slot = sgx_epc_addr(va_page->epc_page) + va_offset;
if (sgx_va_page_full(va_page))
list_move_tail(&va_page->list, &encl->va_pages);
- ret = __sgx_encl_ewb(epc_page, va_page, va_offset, backing);
+ ret = __sgx_encl_ewb(epc_page, va_slot, backing);
if (ret == SGX_NOT_TRACKED) {
ret = __etrack(sgx_epc_addr(encl->secs.epc_page));
if (ret) {
@@ -300,7 +300,7 @@ static void sgx_encl_ewb(struct sgx_epc_page *epc_page,
ENCLS_WARN(ret, "ETRACK");
}
- ret = __sgx_encl_ewb(epc_page, va_page, va_offset, backing);
+ ret = __sgx_encl_ewb(epc_page, va_slot, backing);
if (ret == SGX_NOT_TRACKED) {
/*
* Slow path, send IPIs to kick cpus out of the
@@ -311,8 +311,7 @@ static void sgx_encl_ewb(struct sgx_epc_page *epc_page,
*/
on_each_cpu_mask(sgx_encl_ewb_cpumask(encl),
sgx_ipi_cb, NULL, 1);
- ret = __sgx_encl_ewb(epc_page, va_page, va_offset,
- backing);
+ ret = __sgx_encl_ewb(epc_page, va_slot, backing);
}
}
Now that sgx_epc_addr() is purely a calculation, calculate the VA slot in sgx_encl_ewb() and pass it to __sgx_encl_ewb() to reduce line lengths and avoid re-calculating the address on every EWB attempt. Signed-off-by: Sean Christopherson <sean.j.christopherson@intel.com> --- arch/x86/kernel/cpu/sgx/reclaim.c | 15 +++++++-------- 1 file changed, 7 insertions(+), 8 deletions(-)