@@ -73,7 +73,9 @@ struct asi_session {
enum asi_session_state state; /* state of ASI session */
bool retry_abort; /* always retry abort */
unsigned int abort_depth; /* abort depth */
+ unsigned long isolation_cr3; /* cr3 when ASI is active */
unsigned long original_cr3; /* cr3 before entering ASI */
+ unsigned long original_cr4; /* cr4 before entering ASI */
struct task_struct *task; /* task during isolation */
} __aligned(PAGE_SIZE);
@@ -14,6 +14,7 @@
#include <asm/paravirt.h>
#include <asm/mpx.h>
#include <asm/debugreg.h>
+#include <asm/asi.h>
extern atomic64_t last_mm_ctx_id;
@@ -347,8 +348,23 @@ static inline bool arch_vma_access_permitted(struct vm_area_struct *vma,
*/
static inline unsigned long __get_current_cr3_fast(void)
{
- unsigned long cr3 = build_cr3(this_cpu_read(cpu_tlbstate.loaded_mm)->pgd,
- this_cpu_read(cpu_tlbstate.loaded_mm_asid));
+ unsigned long cr3;
+
+#ifdef CONFIG_ADDRESS_SPACE_ISOLATION
+ /*
+ * If isolation is active, cpu_tlbstate isn't necessarily mapped
+ * in the ASI page-table (and it doesn't have the current pgd anyway).
+ * The current CR3 is cached in the CPU ASI session.
+ */
+ if (this_cpu_read(cpu_asi_session.state) == ASI_SESSION_STATE_ACTIVE)
+ cr3 = this_cpu_read(cpu_asi_session.isolation_cr3);
+ else
+ cr3 = build_cr3(this_cpu_read(cpu_tlbstate.loaded_mm)->pgd,
+ this_cpu_read(cpu_tlbstate.loaded_mm_asid));
+#else
+ cr3 = build_cr3(this_cpu_read(cpu_tlbstate.loaded_mm)->pgd,
+ this_cpu_read(cpu_tlbstate.loaded_mm_asid));
+#endif
/* For now, be very restrictive about when this can be called. */
VM_WARN_ON(in_nmi() || preemptible());
@@ -12,6 +12,7 @@
#include <asm/invpcid.h>
#include <asm/pti.h>
#include <asm/processor-flags.h>
+#include <asm/asi.h>
/*
* The x86 feature is called PCID (Process Context IDentifier). It is similar
@@ -324,6 +325,15 @@ static inline void cr4_toggle_bits_irqsoff(unsigned long mask)
/* Read the CR4 shadow. */
static inline unsigned long cr4_read_shadow(void)
{
+#ifdef CONFIG_ADDRESS_SPACE_ISOLATION
+ /*
+ * If isolation is active, cpu_tlbstate isn't necessarily mapped
+ * in the ASI page-table. The CR4 value is cached in the CPU
+ * ASI session.
+ */
+ if (this_cpu_read(cpu_asi_session.state) == ASI_SESSION_STATE_ACTIVE)
+ return this_cpu_read(cpu_asi_session.original_cr4);
+#endif
return this_cpu_read(cpu_tlbstate.cr4);
}
@@ -23,6 +23,7 @@
/* ASI sessions, one per cpu */
DEFINE_PER_CPU_PAGE_ALIGNED(struct asi_session, cpu_asi_session);
+EXPORT_SYMBOL(cpu_asi_session);
struct asi_map_option {
int flag;
@@ -291,6 +292,8 @@ int asi_enter(struct asi *asi)
goto err_unmap_task;
}
asi_session->original_cr3 = original_cr3;
+ asi_session->original_cr4 = cr4_read_shadow();
+ asi_session->isolation_cr3 = __sme_pa(asi->pgd);
/*
* Use ASI barrier as we are setting CR3 with the ASI page-table.
When address space isolation is active, cpu_tlbstate isn't necessarily mapped in the ASI page-table, this would cause ASI to fault. Instead of just mapping cpu_tlbstate, update __get_current_cr3_fast() and cr4_read_shadow() by caching the cr3/cr4 values in the ASI session when ASI is active. Note that the cached cr3 value is the ASI cr3 value (i.e. the current CR3 value when ASI is active). The cached cr4 value is the cr4 value when isolation was entered (ASI doesn't change cr4). Signed-off-by: Alexandre Chartre <alexandre.chartre@oracle.com> --- arch/x86/include/asm/asi.h | 2 ++ arch/x86/include/asm/mmu_context.h | 20 ++++++++++++++++++-- arch/x86/include/asm/tlbflush.h | 10 ++++++++++ arch/x86/mm/asi.c | 3 +++ 4 files changed, 33 insertions(+), 2 deletions(-)