@@ -92,6 +92,9 @@ static void set_reserved_asid_bits(void)
bitmap_clear(asid_map, 0, NUM_USER_ASIDS);
}
+#define asid_gen_match(asid) \
+ (!(((asid) ^ atomic64_read(&asid_generation)) >> asid_bits))
+
static void flush_context(void)
{
int i;
@@ -220,8 +223,7 @@ void check_and_switch_context(struct mm_struct *mm, unsigned int cpu)
* because atomic RmWs are totally ordered for a given location.
*/
old_active_asid = atomic64_read(&per_cpu(active_asids, cpu));
- if (old_active_asid &&
- !((asid ^ atomic64_read(&asid_generation)) >> asid_bits) &&
+ if (old_active_asid && asid_gen_match(asid) &&
atomic64_cmpxchg_relaxed(&per_cpu(active_asids, cpu),
old_active_asid, asid))
goto switch_mm_fastpath;
@@ -229,7 +231,7 @@ void check_and_switch_context(struct mm_struct *mm, unsigned int cpu)
raw_spin_lock_irqsave(&cpu_asid_lock, flags);
/* Check that our ASID belongs to the current generation. */
asid = atomic64_read(&mm->context.id);
- if ((asid ^ atomic64_read(&asid_generation)) >> asid_bits) {
+ if (!asid_gen_match(asid)) {
asid = new_context(mm);
atomic64_set(&mm->context.id, asid);
}
Add a macro to check if an ASID is from the current generation, since a subsequent patch will introduce a third user for this test. Signed-off-by: Jean-Philippe Brucker <jean-philippe@linaro.org> --- arch/arm64/mm/context.c | 8 +++++--- 1 file changed, 5 insertions(+), 3 deletions(-)