@@ -40,7 +40,8 @@ struct asi {
pgd_t *pgd;
struct asi_class *class;
struct mm_struct *mm;
- int64_t asi_ref_count;
+ u16 pcid_index;
+ int64_t asi_ref_count;
};
DECLARE_PER_CPU_ALIGNED(struct asi_state, asi_cpu_state);
@@ -260,6 +260,9 @@ static inline void arch_tlbbatch_add_mm(struct arch_tlbflush_unmap_batch *batch,
extern void arch_tlbbatch_flush(struct arch_tlbflush_unmap_batch *batch);
unsigned long build_cr3(pgd_t *pgd, u16 asid);
+unsigned long build_cr3_pcid(pgd_t *pgd, u16 pcid, bool noflush);
+
+u16 asi_pcid(struct asi *asi, u16 asid);
#endif /* !MODULE */
@@ -335,6 +335,7 @@ int asi_init(struct mm_struct *mm, int asi_index, struct asi **out_asi)
asi->class = &asi_class[asi_index];
asi->mm = mm;
+ asi->pcid_index = asi_index;
if (asi->class->flags & ASI_MAP_STANDARD_NONSENSITIVE) {
uint i;
@@ -386,6 +387,7 @@ EXPORT_SYMBOL_GPL(asi_destroy);
void __asi_enter(void)
{
u64 asi_cr3;
+ u16 pcid;
struct asi *target = this_cpu_read(asi_cpu_state.target_asi);
VM_BUG_ON(preemptible());
@@ -399,8 +401,8 @@ void __asi_enter(void)
this_cpu_write(asi_cpu_state.curr_asi, target);
- asi_cr3 = build_cr3(target->pgd,
- this_cpu_read(cpu_tlbstate.loaded_mm_asid));
+ pcid = asi_pcid(target, this_cpu_read(cpu_tlbstate.loaded_mm_asid));
+ asi_cr3 = build_cr3_pcid(target->pgd, pcid, false);
write_cr3(asi_cr3);
if (target->class->ops.post_asi_enter)
@@ -97,7 +97,12 @@
# define PTI_CONSUMED_PCID_BITS 0
#endif
-#define CR3_AVAIL_PCID_BITS (X86_CR3_PCID_BITS - PTI_CONSUMED_PCID_BITS)
+#define ASI_CONSUMED_PCID_BITS ASI_MAX_NUM_ORDER
+#define ASI_PCID_BITS_SHIFT CR3_AVAIL_PCID_BITS
+#define CR3_AVAIL_PCID_BITS (X86_CR3_PCID_BITS - PTI_CONSUMED_PCID_BITS - \
+ ASI_CONSUMED_PCID_BITS)
+
+static_assert(TLB_NR_DYN_ASIDS < BIT(CR3_AVAIL_PCID_BITS));
/*
* ASIDs are zero-based: 0->MAX_AVAIL_ASID are valid. -1 below to account
@@ -154,6 +159,34 @@ static inline u16 user_pcid(u16 asid)
return ret;
}
+#ifdef CONFIG_ADDRESS_SPACE_ISOLATION
+
+u16 asi_pcid(struct asi *asi, u16 asid)
+{
+ return kern_pcid(asid) | (asi->pcid_index << ASI_PCID_BITS_SHIFT);
+}
+
+#else /* CONFIG_ADDRESS_SPACE_ISOLATION */
+
+u16 asi_pcid(struct asi *asi, u16 asid)
+{
+ return kern_pcid(asid);
+}
+
+#endif /* CONFIG_ADDRESS_SPACE_ISOLATION */
+
+unsigned long build_cr3_pcid(pgd_t *pgd, u16 pcid, bool noflush)
+{
+ u64 noflush_bit = 0;
+
+ if (!static_cpu_has(X86_FEATURE_PCID))
+ pcid = 0;
+ else if (noflush)
+ noflush_bit = CR3_NOFLUSH;
+
+ return __sme_pa(pgd) | pcid | noflush_bit;
+}
+
inline unsigned long build_cr3(pgd_t *pgd, u16 asid)
{
if (static_cpu_has(X86_FEATURE_PCID)) {
@@ -1078,13 +1111,17 @@ unsigned long __get_current_cr3_fast(void)
pgd_t *pgd;
u16 asid = this_cpu_read(cpu_tlbstate.loaded_mm_asid);
struct asi *asi = asi_get_current();
+ u16 pcid;
- if (asi)
+ if (asi) {
pgd = asi_pgd(asi);
- else
+ pcid = asi_pcid(asi, asid);
+ } else {
pgd = this_cpu_read(cpu_tlbstate.loaded_mm)->pgd;
+ pcid = kern_pcid(asid);
+ }
- cr3 = build_cr3(pgd, asid);
+ cr3 = build_cr3_pcid(pgd, pcid, false);
/* For now, be very restrictive about when this can be called. */
VM_WARN_ON(in_nmi() || preemptible());
Each restricted address space is assigned a separate PCID. Since currently only one ASI instance per-class exists for a given process, the PCID is just derived from the class index. This commit only sets the appropriate PCID when switching CR3, but does not set the NOFLUSH bit. That will be done by later patches. Signed-off-by: Junaid Shahid <junaids@google.com> --- arch/x86/include/asm/asi.h | 3 ++- arch/x86/include/asm/tlbflush.h | 3 +++ arch/x86/mm/asi.c | 6 +++-- arch/x86/mm/tlb.c | 45 ++++++++++++++++++++++++++++++--- 4 files changed, 50 insertions(+), 7 deletions(-)