diff mbox series

[bpf-next,1/2] Patch to Fix deadlocks in queue and stack maps

Message ID 20240429165658.1305969-1-sidchintamaneni@gmail.com (mailing list archive)
State Changes Requested
Delegated to: BPF
Headers show
Series [bpf-next,1/2] Patch to Fix deadlocks in queue and stack maps | expand

Checks

Context Check Description
bpf/vmtest-bpf-next-PR fail PR summary
netdev/series_format success Single patches do not need cover letters
netdev/tree_selection success Clearly marked for bpf-next
netdev/ynl success Generated files up to date; no warnings/errors; no diff in generated;
netdev/fixes_present success Fixes tag not required for -next series
netdev/header_inline success No static functions without inline keyword in header files
netdev/build_32bit success Errors and warnings before: 945 this patch: 945
netdev/build_tools success No tools touched, skip
netdev/cc_maintainers fail 1 blamed authors not CCed: song@kernel.org; 8 maintainers not CCed: kpsingh@kernel.org john.fastabend@gmail.com jolsa@kernel.org eddyz87@gmail.com song@kernel.org martin.lau@linux.dev haoluo@google.com sdf@google.com
netdev/build_clang success Errors and warnings before: 955 this patch: 955
netdev/verify_signedoff success Signed-off-by tag matches author and committer
netdev/deprecated_api success None detected
netdev/check_selftest success No net selftest shell script
netdev/verify_fixes success Fixes tag looks correct
netdev/build_allmodconfig_warn success Errors and warnings before: 956 this patch: 956
netdev/checkpatch warning CHECK: Alignment should match open parenthesis CHECK: Please don't use multiple blank lines CHECK: Unnecessary parentheses around 'qs->map_locked' CHECK: Unnecessary parentheses around qs->map_locked
netdev/build_clang_rust success No Rust files in patch. Skipping build
netdev/kdoc success Errors and warnings before: 0 this patch: 0
netdev/source_inline success Was 0 now: 0
bpf/vmtest-bpf-next-VM_Test-1 success Logs for ShellCheck
bpf/vmtest-bpf-next-VM_Test-2 success Logs for Unittests
bpf/vmtest-bpf-next-VM_Test-3 success Logs for Validate matrix.py
bpf/vmtest-bpf-next-VM_Test-5 success Logs for aarch64-gcc / build-release
bpf/vmtest-bpf-next-VM_Test-0 success Logs for Lint
bpf/vmtest-bpf-next-VM_Test-4 success Logs for aarch64-gcc / build / build for aarch64 with gcc
bpf/vmtest-bpf-next-VM_Test-12 success Logs for s390x-gcc / build-release
bpf/vmtest-bpf-next-VM_Test-10 success Logs for aarch64-gcc / veristat
bpf/vmtest-bpf-next-VM_Test-11 success Logs for s390x-gcc / build / build for s390x with gcc
bpf/vmtest-bpf-next-VM_Test-17 success Logs for s390x-gcc / veristat
bpf/vmtest-bpf-next-VM_Test-18 success Logs for set-matrix
bpf/vmtest-bpf-next-VM_Test-19 success Logs for x86_64-gcc / build / build for x86_64 with gcc
bpf/vmtest-bpf-next-VM_Test-20 success Logs for x86_64-gcc / build-release
bpf/vmtest-bpf-next-VM_Test-28 success Logs for x86_64-llvm-17 / build / build for x86_64 with llvm-17
bpf/vmtest-bpf-next-VM_Test-34 success Logs for x86_64-llvm-17 / veristat
bpf/vmtest-bpf-next-VM_Test-36 success Logs for x86_64-llvm-18 / build-release / build for x86_64 with llvm-18 and -O2 optimization
bpf/vmtest-bpf-next-VM_Test-35 success Logs for x86_64-llvm-18 / build / build for x86_64 with llvm-18
bpf/vmtest-bpf-next-VM_Test-42 success Logs for x86_64-llvm-18 / veristat
bpf/vmtest-bpf-next-VM_Test-29 success Logs for x86_64-llvm-17 / build-release / build for x86_64 with llvm-17 and -O2 optimization
bpf/vmtest-bpf-next-VM_Test-9 success Logs for aarch64-gcc / test (test_verifier, false, 360) / test_verifier on aarch64 with gcc
bpf/vmtest-bpf-next-VM_Test-6 fail Logs for aarch64-gcc / test (test_maps, false, 360) / test_maps on aarch64 with gcc
bpf/vmtest-bpf-next-VM_Test-13 fail Logs for s390x-gcc / test (test_maps, false, 360) / test_maps on s390x with gcc
bpf/vmtest-bpf-next-VM_Test-16 success Logs for s390x-gcc / test (test_verifier, false, 360) / test_verifier on s390x with gcc
bpf/vmtest-bpf-next-VM_Test-7 fail Logs for aarch64-gcc / test (test_progs, false, 360) / test_progs on aarch64 with gcc
bpf/vmtest-bpf-next-VM_Test-8 fail Logs for aarch64-gcc / test (test_progs_no_alu32, false, 360) / test_progs_no_alu32 on aarch64 with gcc
bpf/vmtest-bpf-next-VM_Test-14 fail Logs for s390x-gcc / test (test_progs, false, 360) / test_progs on s390x with gcc
bpf/vmtest-bpf-next-VM_Test-15 fail Logs for s390x-gcc / test (test_progs_no_alu32, false, 360) / test_progs_no_alu32 on s390x with gcc
bpf/vmtest-bpf-next-VM_Test-24 success Logs for x86_64-gcc / test (test_progs_no_alu32_parallel, true, 30) / test_progs_no_alu32_parallel on x86_64 with gcc
bpf/vmtest-bpf-next-VM_Test-26 success Logs for x86_64-gcc / test (test_verifier, false, 360) / test_verifier on x86_64 with gcc
bpf/vmtest-bpf-next-VM_Test-25 success Logs for x86_64-gcc / test (test_progs_parallel, true, 30) / test_progs_parallel on x86_64 with gcc
bpf/vmtest-bpf-next-VM_Test-27 success Logs for x86_64-gcc / veristat / veristat on x86_64 with gcc
bpf/vmtest-bpf-next-VM_Test-37 fail Logs for x86_64-llvm-18 / test (test_maps, false, 360) / test_maps on x86_64 with llvm-18
bpf/vmtest-bpf-next-VM_Test-30 fail Logs for x86_64-llvm-17 / test (test_maps, false, 360) / test_maps on x86_64 with llvm-17
bpf/vmtest-bpf-next-VM_Test-33 success Logs for x86_64-llvm-17 / test (test_verifier, false, 360) / test_verifier on x86_64 with llvm-17
bpf/vmtest-bpf-next-VM_Test-21 fail Logs for x86_64-gcc / test (test_maps, false, 360) / test_maps on x86_64 with gcc
bpf/vmtest-bpf-next-VM_Test-41 success Logs for x86_64-llvm-18 / test (test_verifier, false, 360) / test_verifier on x86_64 with llvm-18
bpf/vmtest-bpf-next-VM_Test-22 fail Logs for x86_64-gcc / test (test_progs, false, 360) / test_progs on x86_64 with gcc
bpf/vmtest-bpf-next-VM_Test-23 fail Logs for x86_64-gcc / test (test_progs_no_alu32, false, 360) / test_progs_no_alu32 on x86_64 with gcc
bpf/vmtest-bpf-next-VM_Test-31 fail Logs for x86_64-llvm-17 / test (test_progs, false, 360) / test_progs on x86_64 with llvm-17
bpf/vmtest-bpf-next-VM_Test-32 fail Logs for x86_64-llvm-17 / test (test_progs_no_alu32, false, 360) / test_progs_no_alu32 on x86_64 with llvm-17
bpf/vmtest-bpf-next-VM_Test-40 fail Logs for x86_64-llvm-18 / test (test_progs_no_alu32, false, 360) / test_progs_no_alu32 on x86_64 with llvm-18
bpf/vmtest-bpf-next-VM_Test-38 fail Logs for x86_64-llvm-18 / test (test_progs, false, 360) / test_progs on x86_64 with llvm-18
bpf/vmtest-bpf-next-VM_Test-39 fail Logs for x86_64-llvm-18 / test (test_progs_cpuv4, false, 360) / test_progs_cpuv4 on x86_64 with llvm-18

Commit Message

Siddharth Chintamaneni April 29, 2024, 4:56 p.m. UTC
From: Siddharth Chintamaneni <sidchintamaneni@vt.edu>

This patch address a possible deadlock issue in queue and
stack map types.

Deadlock could happen when a nested BPF program
acquires the same lock as the parent BPF program
to perform a write operation on the same map as
the first one. This bug is also reported by
syzbot.

Link: https://lore.kernel.org/lkml/0000000000004c3fc90615f37756@google.com/
Reported-by: syzbot+8bdfc2c53fb2b63e1871@syzkaller.appspotmail.com
Fixes: f1a2e44a3aec ("bpf: add queue and stack maps")
Signed-off-by: Siddharth Chintamaneni <sidchintamaneni@vt.edu>
---
 kernel/bpf/queue_stack_maps.c | 42 +++++++++++++++++++++++++++++++++++
 1 file changed, 42 insertions(+)

Comments

Kumar Kartikeya Dwivedi April 29, 2024, 5:46 p.m. UTC | #1
On Mon, 29 Apr 2024 at 18:57, Siddharth Chintamaneni
<sidchintamaneni@gmail.com> wrote:
>
> From: Siddharth Chintamaneni <sidchintamaneni@vt.edu>
>
> This patch address a possible deadlock issue in queue and
> stack map types.
>
> Deadlock could happen when a nested BPF program
> acquires the same lock as the parent BPF program
> to perform a write operation on the same map as
> the first one. This bug is also reported by
> syzbot.
>
> Link: https://lore.kernel.org/lkml/0000000000004c3fc90615f37756@google.com/
> Reported-by: syzbot+8bdfc2c53fb2b63e1871@syzkaller.appspotmail.com
> Fixes: f1a2e44a3aec ("bpf: add queue and stack maps")
> Signed-off-by: Siddharth Chintamaneni <sidchintamaneni@vt.edu>
> ---
>  kernel/bpf/queue_stack_maps.c | 42 +++++++++++++++++++++++++++++++++++
>  1 file changed, 42 insertions(+)
>
> diff --git a/kernel/bpf/queue_stack_maps.c b/kernel/bpf/queue_stack_maps.c
> index d869f51ea93a..4b7df1a53cf2 100644
> --- a/kernel/bpf/queue_stack_maps.c
> +++ b/kernel/bpf/queue_stack_maps.c
> @@ -18,6 +18,7 @@ struct bpf_queue_stack {
>         raw_spinlock_t lock;
>         u32 head, tail;
>         u32 size; /* max_entries + 1 */
> +       int __percpu *map_locked;
>
>         char elements[] __aligned(8);
>  };
> @@ -78,6 +79,16 @@ static struct bpf_map *queue_stack_map_alloc(union bpf_attr *attr)
>
>         qs->size = size;
>
> +       qs->map_locked = bpf_map_alloc_percpu(&qs->map,
> +                                               sizeof(int),
> +                                               sizeof(int),
> +                                               GFP_USER);

GFP_USER | __GFP_NOWARN, like we do everywhere else.

> +       if (!qs->map_locked) {
> +               bpf_map_area_free(qs);
> +               return ERR_PTR(-ENOMEM);
> +       }
> +
> +
>         raw_spin_lock_init(&qs->lock);
>
>         return &qs->map;
> @@ -98,6 +109,16 @@ static long __queue_map_get(struct bpf_map *map, void *value, bool delete)
>         int err = 0;
>         void *ptr;
>
> +       preempt_disable();
> +       local_irq_save(flags);
> +       if (unlikely(__this_cpu_inc_return(*(qs->map_locked)) != 1)) {
> +               __this_cpu_dec(*(qs->map_locked));
> +               local_irq_restore(flags);
> +               preempt_enable();
> +               return -EBUSY;
> +       }
> +       preempt_enable();
> +

You increment, but don't decrement the map_locked counter after unlock.
Likewise in all other cases. Then this operation cannot be called
anymore after the first time on a given cpu for a given map.
Probably why CI is also failing.
https://github.com/kernel-patches/bpf/actions/runs/8882578097/job/24387802831?pr=6915
returns -16 (EBUSY).
E.g. check hashtab.c, it does __this_cpu_dec after unlock in htab_unlock_bucket.

>  [...]
>
>
Siddharth Chintamaneni April 29, 2024, 5:51 p.m. UTC | #2
On Mon, 29 Apr 2024 at 13:47, Kumar Kartikeya Dwivedi <memxor@gmail.com> wrote:
>
> On Mon, 29 Apr 2024 at 18:57, Siddharth Chintamaneni
> <sidchintamaneni@gmail.com> wrote:
> >
> > From: Siddharth Chintamaneni <sidchintamaneni@vt.edu>
> >
> > This patch address a possible deadlock issue in queue and
> > stack map types.
> >
> > Deadlock could happen when a nested BPF program
> > acquires the same lock as the parent BPF program
> > to perform a write operation on the same map as
> > the first one. This bug is also reported by
> > syzbot.
> >
> > Link: https://lore.kernel.org/lkml/0000000000004c3fc90615f37756@google.com/
> > Reported-by: syzbot+8bdfc2c53fb2b63e1871@syzkaller.appspotmail.com
> > Fixes: f1a2e44a3aec ("bpf: add queue and stack maps")
> > Signed-off-by: Siddharth Chintamaneni <sidchintamaneni@vt.edu>
> > ---
> >  kernel/bpf/queue_stack_maps.c | 42 +++++++++++++++++++++++++++++++++++
> >  1 file changed, 42 insertions(+)
> >
> > diff --git a/kernel/bpf/queue_stack_maps.c b/kernel/bpf/queue_stack_maps.c
> > index d869f51ea93a..4b7df1a53cf2 100644
> > --- a/kernel/bpf/queue_stack_maps.c
> > +++ b/kernel/bpf/queue_stack_maps.c
> > @@ -18,6 +18,7 @@ struct bpf_queue_stack {
> >         raw_spinlock_t lock;
> >         u32 head, tail;
> >         u32 size; /* max_entries + 1 */
> > +       int __percpu *map_locked;
> >
> >         char elements[] __aligned(8);
> >  };
> > @@ -78,6 +79,16 @@ static struct bpf_map *queue_stack_map_alloc(union bpf_attr *attr)
> >
> >         qs->size = size;
> >
> > +       qs->map_locked = bpf_map_alloc_percpu(&qs->map,
> > +                                               sizeof(int),
> > +                                               sizeof(int),
> > +                                               GFP_USER);
>
> GFP_USER | __GFP_NOWARN, like we do everywhere else.
>
> > +       if (!qs->map_locked) {
> > +               bpf_map_area_free(qs);
> > +               return ERR_PTR(-ENOMEM);
> > +       }
> > +
> > +
> >         raw_spin_lock_init(&qs->lock);
> >
> >         return &qs->map;
> > @@ -98,6 +109,16 @@ static long __queue_map_get(struct bpf_map *map, void *value, bool delete)
> >         int err = 0;
> >         void *ptr;
> >
> > +       preempt_disable();
> > +       local_irq_save(flags);
> > +       if (unlikely(__this_cpu_inc_return(*(qs->map_locked)) != 1)) {
> > +               __this_cpu_dec(*(qs->map_locked));
> > +               local_irq_restore(flags);
> > +               preempt_enable();
> > +               return -EBUSY;
> > +       }
> > +       preempt_enable();
> > +
>
> You increment, but don't decrement the map_locked counter after unlock.
> Likewise in all other cases. Then this operation cannot be called
> anymore after the first time on a given cpu for a given map.
> Probably why CI is also failing.
> https://github.com/kernel-patches/bpf/actions/runs/8882578097/job/24387802831?pr=6915
> returns -16 (EBUSY).
> E.g. check hashtab.c, it does __this_cpu_dec after unlock in htab_unlock_bucket.
>

My bad I sent a wrong patch, I will send a revised version.

> >  [...]
> >
> >
Dan Carpenter May 4, 2024, 12:22 p.m. UTC | #3
Hi Siddharth,

kernel test robot noticed the following build warnings:

url:    https://github.com/intel-lab-lkp/linux/commits/Siddharth-Chintamaneni/Added-selftests-to-check-deadlocks-in-queue-and-stack-map/20240430-142201
base:   https://git.kernel.org/pub/scm/linux/kernel/git/bpf/bpf-next.git master
patch link:    https://lore.kernel.org/r/20240429165658.1305969-1-sidchintamaneni%40gmail.com
patch subject: [PATCH bpf-next 1/2] Patch to Fix deadlocks in queue and stack maps
config: i386-randconfig-141-20240504 (https://download.01.org/0day-ci/archive/20240504/202405041108.2Up5HT0H-lkp@intel.com/config)
compiler: clang version 18.1.4 (https://github.com/llvm/llvm-project e6c3289804a67ea0bb6a86fadbe454dd93b8d855)

If you fix the issue in a separate patch/commit (i.e. not just a new version of
the same patch/commit), kindly add following tags
| Reported-by: kernel test robot <lkp@intel.com>
| Reported-by: Dan Carpenter <dan.carpenter@linaro.org>
| Closes: https://lore.kernel.org/r/202405041108.2Up5HT0H-lkp@intel.com/

smatch warnings:
kernel/bpf/queue_stack_maps.c:273 queue_stack_map_push_elem() warn: inconsistent returns 'irq_flags'.

vim +/irq_flags +273 kernel/bpf/queue_stack_maps.c

f1a2e44a3aeccb Mauricio Vasquez B     2018-10-18  219  
f1a2e44a3aeccb Mauricio Vasquez B     2018-10-18  220  /* Called from syscall or from eBPF program */
d7ba4cc900bf1e JP Kobryn              2023-03-22  221  static long queue_stack_map_push_elem(struct bpf_map *map, void *value,
f1a2e44a3aeccb Mauricio Vasquez B     2018-10-18  222  				      u64 flags)
f1a2e44a3aeccb Mauricio Vasquez B     2018-10-18  223  {
f1a2e44a3aeccb Mauricio Vasquez B     2018-10-18  224  	struct bpf_queue_stack *qs = bpf_queue_stack(map);
f1a2e44a3aeccb Mauricio Vasquez B     2018-10-18  225  	unsigned long irq_flags;
f1a2e44a3aeccb Mauricio Vasquez B     2018-10-18  226  	int err = 0;
f1a2e44a3aeccb Mauricio Vasquez B     2018-10-18  227  	void *dst;
f1a2e44a3aeccb Mauricio Vasquez B     2018-10-18  228  
568ce03b978beb Siddharth Chintamaneni 2024-04-29  229  	preempt_disable();
568ce03b978beb Siddharth Chintamaneni 2024-04-29  230  	local_irq_save(irq_flags);
568ce03b978beb Siddharth Chintamaneni 2024-04-29  231  	if (unlikely(__this_cpu_inc_return(*(qs->map_locked)) != 1)) {
568ce03b978beb Siddharth Chintamaneni 2024-04-29  232  		__this_cpu_dec(*(qs->map_locked));
568ce03b978beb Siddharth Chintamaneni 2024-04-29  233  		local_irq_restore(irq_flags);
568ce03b978beb Siddharth Chintamaneni 2024-04-29  234  		preempt_enable();
568ce03b978beb Siddharth Chintamaneni 2024-04-29  235  		return -EBUSY;
568ce03b978beb Siddharth Chintamaneni 2024-04-29  236  	}
568ce03b978beb Siddharth Chintamaneni 2024-04-29  237  	preempt_enable();
568ce03b978beb Siddharth Chintamaneni 2024-04-29  238  
f1a2e44a3aeccb Mauricio Vasquez B     2018-10-18  239  	/* BPF_EXIST is used to force making room for a new element in case the
f1a2e44a3aeccb Mauricio Vasquez B     2018-10-18  240  	 * map is full
f1a2e44a3aeccb Mauricio Vasquez B     2018-10-18  241  	 */
f1a2e44a3aeccb Mauricio Vasquez B     2018-10-18  242  	bool replace = (flags & BPF_EXIST);
f1a2e44a3aeccb Mauricio Vasquez B     2018-10-18  243  
f1a2e44a3aeccb Mauricio Vasquez B     2018-10-18  244  	/* Check supported flags for queue and stack maps */
f1a2e44a3aeccb Mauricio Vasquez B     2018-10-18  245  	if (flags & BPF_NOEXIST || flags > BPF_EXIST)
f1a2e44a3aeccb Mauricio Vasquez B     2018-10-18  246  		return -EINVAL;

local_irq_restore(irq_flags) before returning.

f1a2e44a3aeccb Mauricio Vasquez B     2018-10-18  247  
a34a9f1a19afe9 Toke Høiland-Jørgensen 2023-09-11  248  	if (in_nmi()) {
a34a9f1a19afe9 Toke Høiland-Jørgensen 2023-09-11  249  		if (!raw_spin_trylock_irqsave(&qs->lock, irq_flags))


_irqsave can't be nested.  Has this code been tested?  Perhaps it works
because the callers always call this with IRQs disabled.

a34a9f1a19afe9 Toke Høiland-Jørgensen 2023-09-11  250  			return -EBUSY;
a34a9f1a19afe9 Toke Høiland-Jørgensen 2023-09-11  251  	} else {
f1a2e44a3aeccb Mauricio Vasquez B     2018-10-18  252  		raw_spin_lock_irqsave(&qs->lock, irq_flags);
a34a9f1a19afe9 Toke Høiland-Jørgensen 2023-09-11  253  	}
f1a2e44a3aeccb Mauricio Vasquez B     2018-10-18  254  
f1a2e44a3aeccb Mauricio Vasquez B     2018-10-18  255  	if (queue_stack_map_is_full(qs)) {
f1a2e44a3aeccb Mauricio Vasquez B     2018-10-18  256  		if (!replace) {
f1a2e44a3aeccb Mauricio Vasquez B     2018-10-18  257  			err = -E2BIG;
f1a2e44a3aeccb Mauricio Vasquez B     2018-10-18  258  			goto out;
f1a2e44a3aeccb Mauricio Vasquez B     2018-10-18  259  		}
f1a2e44a3aeccb Mauricio Vasquez B     2018-10-18  260  		/* advance tail pointer to overwrite oldest element */
f1a2e44a3aeccb Mauricio Vasquez B     2018-10-18  261  		if (unlikely(++qs->tail >= qs->size))
f1a2e44a3aeccb Mauricio Vasquez B     2018-10-18  262  			qs->tail = 0;
f1a2e44a3aeccb Mauricio Vasquez B     2018-10-18  263  	}
f1a2e44a3aeccb Mauricio Vasquez B     2018-10-18  264  
f1a2e44a3aeccb Mauricio Vasquez B     2018-10-18  265  	dst = &qs->elements[qs->head * qs->map.value_size];
f1a2e44a3aeccb Mauricio Vasquez B     2018-10-18  266  	memcpy(dst, value, qs->map.value_size);
f1a2e44a3aeccb Mauricio Vasquez B     2018-10-18  267  
f1a2e44a3aeccb Mauricio Vasquez B     2018-10-18  268  	if (unlikely(++qs->head >= qs->size))
f1a2e44a3aeccb Mauricio Vasquez B     2018-10-18  269  		qs->head = 0;
f1a2e44a3aeccb Mauricio Vasquez B     2018-10-18  270  
f1a2e44a3aeccb Mauricio Vasquez B     2018-10-18  271  out:
f1a2e44a3aeccb Mauricio Vasquez B     2018-10-18  272  	raw_spin_unlock_irqrestore(&qs->lock, irq_flags);
f1a2e44a3aeccb Mauricio Vasquez B     2018-10-18 @273  	return err;
f1a2e44a3aeccb Mauricio Vasquez B     2018-10-18  274  }
diff mbox series

Patch

diff --git a/kernel/bpf/queue_stack_maps.c b/kernel/bpf/queue_stack_maps.c
index d869f51ea93a..4b7df1a53cf2 100644
--- a/kernel/bpf/queue_stack_maps.c
+++ b/kernel/bpf/queue_stack_maps.c
@@ -18,6 +18,7 @@  struct bpf_queue_stack {
 	raw_spinlock_t lock;
 	u32 head, tail;
 	u32 size; /* max_entries + 1 */
+	int __percpu *map_locked;
 
 	char elements[] __aligned(8);
 };
@@ -78,6 +79,16 @@  static struct bpf_map *queue_stack_map_alloc(union bpf_attr *attr)
 
 	qs->size = size;
 
+	qs->map_locked = bpf_map_alloc_percpu(&qs->map,
+						sizeof(int),
+						sizeof(int),
+						GFP_USER);
+	if (!qs->map_locked) {
+		bpf_map_area_free(qs);
+		return ERR_PTR(-ENOMEM);
+	}
+
+
 	raw_spin_lock_init(&qs->lock);
 
 	return &qs->map;
@@ -98,6 +109,16 @@  static long __queue_map_get(struct bpf_map *map, void *value, bool delete)
 	int err = 0;
 	void *ptr;
 
+	preempt_disable();
+	local_irq_save(flags);
+	if (unlikely(__this_cpu_inc_return(*(qs->map_locked)) != 1)) {
+		__this_cpu_dec(*(qs->map_locked));
+		local_irq_restore(flags);
+		preempt_enable();
+		return -EBUSY;
+	}
+	preempt_enable();
+
 	if (in_nmi()) {
 		if (!raw_spin_trylock_irqsave(&qs->lock, flags))
 			return -EBUSY;
@@ -133,6 +154,17 @@  static long __stack_map_get(struct bpf_map *map, void *value, bool delete)
 	void *ptr;
 	u32 index;
 
+	preempt_disable();
+	local_irq_save(flags);
+	if (unlikely(__this_cpu_inc_return(*(qs->map_locked)) != 1)) {
+		__this_cpu_dec(*(qs->map_locked));
+		local_irq_restore(flags);
+		preempt_enable();
+		return -EBUSY;
+	}
+	preempt_enable();
+
+
 	if (in_nmi()) {
 		if (!raw_spin_trylock_irqsave(&qs->lock, flags))
 			return -EBUSY;
@@ -194,6 +226,16 @@  static long queue_stack_map_push_elem(struct bpf_map *map, void *value,
 	int err = 0;
 	void *dst;
 
+	preempt_disable();
+	local_irq_save(irq_flags);
+	if (unlikely(__this_cpu_inc_return(*(qs->map_locked)) != 1)) {
+		__this_cpu_dec(*(qs->map_locked));
+		local_irq_restore(irq_flags);
+		preempt_enable();
+		return -EBUSY;
+	}
+	preempt_enable();
+
 	/* BPF_EXIST is used to force making room for a new element in case the
 	 * map is full
 	 */