@@ -162,17 +162,25 @@ static inline int htab_lock_bucket(const struct bpf_htab *htab,
unsigned long *pflags)
{
unsigned long flags;
+ bool use_raw_lock;
hash = hash & HASHTAB_MAP_LOCK_MASK;
- migrate_disable();
+ use_raw_lock = htab_use_raw_lock(htab);
+ if (use_raw_lock)
+ preempt_disable();
+ else
+ migrate_disable();
if (unlikely(__this_cpu_inc_return(*(htab->map_locked[hash])) != 1)) {
__this_cpu_dec(*(htab->map_locked[hash]));
- migrate_enable();
+ if (use_raw_lock)
+ preempt_enable();
+ else
+ migrate_enable();
return -EBUSY;
}
- if (htab_use_raw_lock(htab))
+ if (use_raw_lock)
raw_spin_lock_irqsave(&b->raw_lock, flags);
else
spin_lock_irqsave(&b->lock, flags);
@@ -185,13 +193,18 @@ static inline void htab_unlock_bucket(const struct bpf_htab *htab,
struct bucket *b, u32 hash,
unsigned long flags)
{
+ bool use_raw_lock = htab_use_raw_lock(htab);
+
hash = hash & HASHTAB_MAP_LOCK_MASK;
- if (htab_use_raw_lock(htab))
+ if (use_raw_lock)
raw_spin_unlock_irqrestore(&b->raw_lock, flags);
else
spin_unlock_irqrestore(&b->lock, flags);
__this_cpu_dec(*(htab->map_locked[hash]));
- migrate_enable();
+ if (use_raw_lock)
+ preempt_enable();
+ else
+ migrate_enable();
}
static bool htab_lru_map_delete_node(void *arg, struct bpf_lru_node *node);