@@ -1156,7 +1156,7 @@ BPF_CALL_3(bpf_timer_init, struct bpf_timer_kern *, timer, struct bpf_map *, map
u64, flags)
{
clockid_t clockid = flags & (MAX_CLOCKS - 1);
- struct bpf_hrtimer *t;
+ struct bpf_hrtimer *t, *to_free = NULL;
int ret = 0;
BUILD_BUG_ON(MAX_CLOCKS != 16);
@@ -1198,8 +1198,20 @@ BPF_CALL_3(bpf_timer_init, struct bpf_timer_kern *, timer, struct bpf_map *, map
hrtimer_init(&t->timer, clockid, HRTIMER_MODE_REL_SOFT);
t->timer.function = bpf_timer_cb;
timer->timer = t;
+ /* Guarantee order between timer->timer and map->usercnt. So when
+ * there are concurrent uref release and bpf timer init, either
+ * bpf_timer_cancel_and_free() called by uref release reads a no-NULL
+ * timer or atomic64_read() below reads a zero usercnt.
+ */
+ smp_mb__before_atomic();
+ if (!atomic64_read(&map->usercnt)) {
+ timer->timer = NULL;
+ to_free = t;
+ ret = -EPERM;
+ }
out:
__bpf_spin_unlock_irqrestore(&timer->lock);
+ kfree(to_free);
return ret;
}