@@ -12,6 +12,7 @@
#include <linux/slab.h>
#include <linux/mm.h>
#include <linux/cpumask.h>
+#include <linux/sched/isolation.h>
#include <asm/cpufeature.h>
#include <asm/mmu_context.h>
@@ -24,10 +25,20 @@ struct asid_bitmap {
unsigned long max;
};
+enum {
+ ASID_HOUSEKEEPING = 0,
+ ASID_ISOLATION = 1,
+ ASID_TYPE_MAX,
+};
+
+struct asid_domain {
+ atomic64_t asid_generation;
+};
+
static u32 asid_bits;
static DEFINE_RAW_SPINLOCK(cpu_asid_lock);
-static atomic64_t asid_generation;
+static struct asid_domain asid_domain[ASID_TYPE_MAX];
static unsigned long *asid_map;
static DEFINE_PER_CPU(atomic64_t, active_asids);
@@ -36,11 +47,16 @@ static cpumask_t tlb_flush_pending;
static const struct cpumask *asid_housekeeping_mask;
static struct asid_bitmap pinned_asid;
+static struct asid_bitmap isolated_asid;
+
+static int asid_isolation_cmdline;
+static DEFINE_STATIC_KEY_FALSE(asid_isolation_enable);
#define ASID_MASK (~GENMASK(asid_bits - 1, 0))
-#define ASID_FIRST_VERSION (1UL << asid_bits)
+#define NUM_USER_ASIDS (1UL << asid_bits)
-#define NUM_USER_ASIDS ASID_FIRST_VERSION
+#define ASID_ISOLATION_FLAG (NUM_USER_ASIDS)
+#define ASID_FIRST_VERSION (NUM_USER_ASIDS << 1)
#define ctxid2asid(asid) ((asid) & ~ASID_MASK)
#define asid2ctxid(asid, genid) ((asid) | (genid))
@@ -94,6 +110,61 @@ static void set_kpti_asid_bits(unsigned long *map)
memset(map, 0xaa, len);
}
+static inline bool is_isolated_asid(u64 asid)
+{
+ /*
+ * Note that asid 0 is not the isolated asid. The judgment
+ * is correct in this situation since the ASID_ISOLATION_FLAG
+ * bit is defined as 1 to indicate ISOLATION domain.
+ */
+ return asid & ASID_ISOLATION_FLAG;
+}
+
+static inline bool on_isolated_cpu(int cpu)
+{
+ return !cpumask_test_cpu(cpu, asid_housekeeping_mask);
+}
+
+static inline int asid_domain_type(u64 asid, unsigned int cpu)
+{
+ if (on_isolated_cpu(cpu) || is_isolated_asid(asid))
+ return ASID_ISOLATION;
+
+ return ASID_HOUSEKEEPING;
+}
+
+static inline int asid_flush_type(void)
+{
+ if (isolated_asid.nr > isolated_asid.max)
+ return ASID_ISOLATION;
+ else
+ return ASID_HOUSEKEEPING;
+}
+
+static void asid_try_to_isolate(u64 asid)
+{
+ if (!static_branch_unlikely(&asid_isolation_enable))
+ return;
+
+ if (!is_isolated_asid(asid))
+ return;
+ if (!__test_and_set_bit(ctxid2asid(asid), isolated_asid.map))
+ isolated_asid.nr++;
+}
+
+static void update_reserved_asid_bits(void)
+{
+ if (!static_branch_unlikely(&asid_isolation_enable))
+ return;
+
+ if (asid_flush_type() == ASID_HOUSEKEEPING) {
+ bitmap_or(asid_map, asid_map, isolated_asid.map, NUM_USER_ASIDS);
+ } else {
+ bitmap_zero(isolated_asid.map, NUM_USER_ASIDS);
+ isolated_asid.nr = 0;
+ }
+}
+
static void set_reserved_asid_bits(void)
{
if (pinned_asid.map)
@@ -102,23 +173,51 @@ static void set_reserved_asid_bits(void)
set_kpti_asid_bits(asid_map);
else
bitmap_clear(asid_map, 0, NUM_USER_ASIDS);
+
+ update_reserved_asid_bits();
}
static void asid_generation_init(void)
{
- atomic64_set(&asid_generation, ASID_FIRST_VERSION);
+ struct asid_domain *ad;
+
+ ad = &asid_domain[ASID_HOUSEKEEPING];
+ atomic64_set(&ad->asid_generation, ASID_FIRST_VERSION);
+
+ ad = &asid_domain[ASID_ISOLATION];
+ atomic64_set(&ad->asid_generation, ASID_ISOLATION_FLAG);
}
static void flush_generation(void)
{
+ struct asid_domain *ad = &asid_domain[ASID_HOUSEKEEPING];
+
/* We're out of ASIDs, so increment the global generation count */
atomic64_add_return_relaxed(ASID_FIRST_VERSION,
- &asid_generation);
+ &ad->asid_generation);
+
+ if (asid_flush_type() == ASID_ISOLATION) {
+ ad = &asid_domain[ASID_ISOLATION];
+ atomic64_add_return_relaxed(ASID_FIRST_VERSION,
+ &ad->asid_generation);
+ }
}
-static inline u64 asid_read_generation(void)
+static inline u64 asid_read_generation(int type)
{
- return atomic64_read(&asid_generation);
+ struct asid_domain *ad = &asid_domain[type];
+
+ return atomic64_read(&ad->asid_generation);
+}
+
+static inline u64 asid_curr_generation(u64 asid)
+{
+ int type = ASID_HOUSEKEEPING;
+
+ if (static_branch_unlikely(&asid_isolation_enable))
+ type = asid_domain_type(asid, smp_processor_id());
+
+ return asid_read_generation(type);
}
static inline bool asid_match(u64 asid, u64 genid)
@@ -128,12 +227,28 @@ static inline bool asid_match(u64 asid, u64 genid)
static inline bool asid_gen_match(u64 asid)
{
- return asid_match(asid, asid_read_generation());
+ return asid_match(asid, asid_curr_generation(asid));
+}
+
+static bool asid_is_migrated(u64 asid, u64 newasid)
+{
+ if (!static_branch_unlikely(&asid_isolation_enable))
+ return false;
+
+ if (!is_isolated_asid(asid) && is_isolated_asid(newasid)) {
+ u64 generation = asid_read_generation(ASID_HOUSEKEEPING);
+
+ return asid_match(asid, generation);
+ }
+ return false;
}
static const struct cpumask *flush_cpumask(void)
{
- return asid_housekeeping_mask;
+ if (asid_flush_type() == ASID_HOUSEKEEPING)
+ return asid_housekeeping_mask;
+
+ return cpu_possible_mask;
}
static void flush_context(void)
@@ -159,6 +274,7 @@ static void flush_context(void)
if (asid == 0)
asid = per_cpu(reserved_asids, i);
__set_bit(ctxid2asid(asid), asid_map);
+ asid_try_to_isolate(asid);
per_cpu(reserved_asids, i) = asid;
}
@@ -193,21 +309,23 @@ static bool check_update_reserved_asid(u64 asid, u64 newasid)
return hit;
}
-static u64 new_context(struct mm_struct *mm)
+static u64 new_context(struct mm_struct *mm, unsigned int cpu)
{
static u32 cur_idx = 1;
u64 asid = atomic64_read(&mm->context.id);
- u64 generation = asid_read_generation();
+ int domain = asid_domain_type(asid, cpu);
+ u64 generation = asid_read_generation(domain);
+ u64 newasid;
if (asid != 0) {
- u64 newasid = asid2ctxid(ctxid2asid(asid), generation);
+ newasid = asid2ctxid(ctxid2asid(asid), generation);
/*
* If our current ASID was active during a rollover, we
* can continue to use it and this was just a false alarm.
*/
if (check_update_reserved_asid(asid, newasid))
- return newasid;
+ goto out;
/*
* If it is pinned, we can keep using it. Note that reserved
@@ -215,14 +333,21 @@ static u64 new_context(struct mm_struct *mm)
* update the generation into the reserved_asids.
*/
if (refcount_read(&mm->context.pinned))
- return newasid;
+ goto out;
/*
* We had a valid ASID in a previous life, so try to re-use
* it if possible.
*/
if (!__test_and_set_bit(ctxid2asid(asid), asid_map))
- return newasid;
+ goto out;
+
+ /*
+ * We still have a valid ASID now, but the ASID is migrated from
+ * normal to isolated domain, we should re-use it.
+ */
+ if (asid_is_migrated(asid, newasid))
+ goto out;
}
/*
@@ -241,11 +366,14 @@ static u64 new_context(struct mm_struct *mm)
/* We have more ASIDs than CPUs, so this will always succeed */
asid = find_next_zero_bit(asid_map, NUM_USER_ASIDS, 1);
- generation = asid_read_generation();
+ generation = asid_read_generation(domain);
set_asid:
__set_bit(asid, asid_map);
cur_idx = asid;
- return asid2ctxid(asid, generation);
+ newasid = asid2ctxid(asid, generation);
+out:
+ asid_try_to_isolate(newasid);
+ return newasid;
}
void check_and_switch_context(struct mm_struct *mm)
@@ -282,12 +410,12 @@ void check_and_switch_context(struct mm_struct *mm)
raw_spin_lock_irqsave(&cpu_asid_lock, flags);
/* Check that our ASID belongs to the current generation. */
asid = atomic64_read(&mm->context.id);
+ cpu = smp_processor_id();
if (!asid_gen_match(asid)) {
- asid = new_context(mm);
+ asid = new_context(mm, cpu);
atomic64_set(&mm->context.id, asid);
}
- cpu = smp_processor_id();
if (cpumask_test_and_clear_cpu(cpu, &tlb_flush_pending))
local_flush_tlb_all();
@@ -327,11 +455,12 @@ unsigned long arm64_mm_context_get(struct mm_struct *mm)
}
if (!asid_gen_match(asid)) {
+ unsigned int cpu = smp_processor_id();
/*
* We went through one or more rollover since that ASID was
* used. Ensure that it is still valid, or generate a new one.
*/
- asid = new_context(mm);
+ asid = new_context(mm, cpu);
atomic64_set(&mm->context.id, asid);
}
@@ -430,10 +559,36 @@ static int asids_update_limit(void)
* are pinned, there still is at least one empty slot in the ASID map.
*/
pinned_asid.max = num_available_asids - num_possible_cpus() - 2;
+
+ /*
+ * Generally, the user does not care about the number of asids, so set
+ * to half of the total number as the default setting of the maximum
+ * threshold of the isolated asid.
+ */
+ if (isolated_asid.map)
+ isolated_asid.max = num_available_asids / 2;
+
return 0;
}
arch_initcall(asids_update_limit);
+static void asid_isolation_init(void)
+{
+ if (asid_isolation_cmdline == 0)
+ return;
+
+ if (!housekeeping_enabled(HK_TYPE_DOMAIN))
+ return;
+
+ isolated_asid.map = bitmap_zalloc(NUM_USER_ASIDS, GFP_KERNEL);
+ if (!isolated_asid.map)
+ return;
+
+ asid_housekeeping_mask = housekeeping_cpumask(HK_TYPE_DOMAIN);
+ static_branch_enable(&asid_isolation_enable);
+ pr_info("ASID Isolation enable\n");
+}
+
static int asids_init(void)
{
asid_bits = get_cpu_asid_bits();
@@ -448,6 +603,7 @@ static int asids_init(void)
pinned_asid.nr = 0;
asid_housekeeping_mask = cpu_possible_mask;
+ asid_isolation_init();
/*
* We cannot call set_reserved_asid_bits() here because CPU
@@ -459,3 +615,10 @@ static int asids_init(void)
return 0;
}
early_initcall(asids_init);
+
+static int __init asid_isolation_setup(char *str)
+{
+ asid_isolation_cmdline = 1;
+ return 1;
+}
+__setup("asid_isolation", asid_isolation_setup);