diff mbox series

[RFC,bpf-next,06/11] bpf: Add bpf_rbtree_{lock,unlock} helpers

Message ID 20220722183438.3319790-7-davemarchevsky@fb.com (mailing list archive)
State RFC
Delegated to: BPF
Headers show
Series bpf: Introduce rbtree map | expand

Checks

Context Check Description
bpf/vmtest-bpf-next-PR fail merge-conflict
netdev/tree_selection success Clearly marked for bpf-next, async
netdev/apply fail Patch does not apply to bpf-next

Commit Message

Dave Marchevsky July 22, 2022, 6:34 p.m. UTC
These helpers are equivalent to bpf_spin_{lock,unlock}, but the verifier
doesn't try to enforce that no helper calls occur when there's an active
spin lock.

[ TODO: Currently the verifier doesn't do _anything_ spinlock related
when it sees one of these, including setting active_spin_lock. This is
probably too lenient. Also, EXPORT_SYMBOL for internal lock helpers
might not be the best code structure. ]

Future patches will add enforcement of "rbtree helpers must always be
called when lock is held" constraint.

Signed-off-by: Dave Marchevsky <davemarchevsky@fb.com>
---
 include/uapi/linux/bpf.h       | 20 ++++++++++++++++++++
 kernel/bpf/helpers.c           | 12 ++++++++++--
 kernel/bpf/rbtree.c            | 29 +++++++++++++++++++++++++++++
 kernel/bpf/verifier.c          |  2 ++
 tools/include/uapi/linux/bpf.h | 20 ++++++++++++++++++++
 5 files changed, 81 insertions(+), 2 deletions(-)

Comments

Alexei Starovoitov Aug. 1, 2022, 9:58 p.m. UTC | #1
On 7/22/22 11:34 AM, Dave Marchevsky wrote:
> These helpers are equivalent to bpf_spin_{lock,unlock}, but the verifier
> doesn't try to enforce that no helper calls occur when there's an active
> spin lock.
> 
> [ TODO: Currently the verifier doesn't do _anything_ spinlock related
> when it sees one of these, including setting active_spin_lock. This is
> probably too lenient. Also, EXPORT_SYMBOL for internal lock helpers
> might not be the best code structure. ]
> 
> Future patches will add enforcement of "rbtree helpers must always be
> called when lock is held" constraint.
> 
> Signed-off-by: Dave Marchevsky <davemarchevsky@fb.com>
> ---
>   include/uapi/linux/bpf.h       | 20 ++++++++++++++++++++
>   kernel/bpf/helpers.c           | 12 ++++++++++--
>   kernel/bpf/rbtree.c            | 29 +++++++++++++++++++++++++++++
>   kernel/bpf/verifier.c          |  2 ++
>   tools/include/uapi/linux/bpf.h | 20 ++++++++++++++++++++
>   5 files changed, 81 insertions(+), 2 deletions(-)
> 
> diff --git a/include/uapi/linux/bpf.h b/include/uapi/linux/bpf.h
> index c677d92de3bc..d21e2c99ea14 100644
> --- a/include/uapi/linux/bpf.h
> +++ b/include/uapi/linux/bpf.h
> @@ -5391,6 +5391,24 @@ union bpf_attr {
>    *
>    *	Return
>    *		Ptr to lock
> + *
> + * void *bpf_rbtree_lock(struct bpf_spin_lock *lock)
> + *	Description
> + *		Like bpf_spin_lock helper, but use separate helper for now
> + *		as we don't want this helper to have special meaning to the verifier
> + *		so that we can do rbtree helper calls between rbtree_lock/unlock
> + *
> + *	Return
> + *		0
> + *
> + * void *bpf_rbtree_unlock(struct bpf_spin_lock *lock)
> + *	Description
> + *		Like bpf_spin_unlock helper, but use separate helper for now
> + *		as we don't want this helper to have special meaning to the verifier
> + *		so that we can do rbtree helper calls between rbtree_lock/unlock
> + *
> + *	Return
> + *		0
>    */
>   #define __BPF_FUNC_MAPPER(FN)		\
>   	FN(unspec),			\
> @@ -5607,6 +5625,8 @@ union bpf_attr {
>   	FN(rbtree_remove),		\
>   	FN(rbtree_free_node),		\
>   	FN(rbtree_get_lock),		\
> +	FN(rbtree_lock),		\
> +	FN(rbtree_unlock),		\
>   	/* */
>   
>   /* integer value in 'imm' field of BPF_CALL instruction selects which helper
> diff --git a/kernel/bpf/helpers.c b/kernel/bpf/helpers.c
> index 257a808bb767..fa2dba1dcec8 100644
> --- a/kernel/bpf/helpers.c
> +++ b/kernel/bpf/helpers.c
> @@ -303,7 +303,7 @@ static inline void __bpf_spin_unlock(struct bpf_spin_lock *lock)
>   
>   static DEFINE_PER_CPU(unsigned long, irqsave_flags);
>   
> -static inline void __bpf_spin_lock_irqsave(struct bpf_spin_lock *lock)
> +inline void __bpf_spin_lock_irqsave(struct bpf_spin_lock *lock)
>   {
>   	unsigned long flags;
>   
> @@ -311,6 +311,7 @@ static inline void __bpf_spin_lock_irqsave(struct bpf_spin_lock *lock)
>   	__bpf_spin_lock(lock);
>   	__this_cpu_write(irqsave_flags, flags);
>   }
> +EXPORT_SYMBOL(__bpf_spin_lock_irqsave);

what is it for?
It's not used out of modules.

>   
>   notrace BPF_CALL_1(bpf_spin_lock, struct bpf_spin_lock *, lock)
>   {
> @@ -325,7 +326,7 @@ const struct bpf_func_proto bpf_spin_lock_proto = {
>   	.arg1_type	= ARG_PTR_TO_SPIN_LOCK,
>   };
>   
> -static inline void __bpf_spin_unlock_irqrestore(struct bpf_spin_lock *lock)
> +inline void __bpf_spin_unlock_irqrestore(struct bpf_spin_lock *lock)
>   {
>   	unsigned long flags;
>   
> @@ -333,6 +334,7 @@ static inline void __bpf_spin_unlock_irqrestore(struct bpf_spin_lock *lock)
>   	__bpf_spin_unlock(lock);
>   	local_irq_restore(flags);
>   }
> +EXPORT_SYMBOL(__bpf_spin_unlock_irqrestore);
>   
>   notrace BPF_CALL_1(bpf_spin_unlock, struct bpf_spin_lock *, lock)
>   {
> @@ -1588,6 +1590,8 @@ const struct bpf_func_proto bpf_rbtree_find_proto __weak;
>   const struct bpf_func_proto bpf_rbtree_remove_proto __weak;
>   const struct bpf_func_proto bpf_rbtree_free_node_proto __weak;
>   const struct bpf_func_proto bpf_rbtree_get_lock_proto __weak;
> +const struct bpf_func_proto bpf_rbtree_lock_proto __weak;
> +const struct bpf_func_proto bpf_rbtree_unlock_proto __weak;
>   
>   const struct bpf_func_proto *
>   bpf_base_func_proto(enum bpf_func_id func_id)
> @@ -1689,6 +1693,10 @@ bpf_base_func_proto(enum bpf_func_id func_id)
>   		return &bpf_rbtree_free_node_proto;
>   	case BPF_FUNC_rbtree_get_lock:
>   		return &bpf_rbtree_get_lock_proto;
> +	case BPF_FUNC_rbtree_lock:
> +		return &bpf_rbtree_lock_proto;
> +	case BPF_FUNC_rbtree_unlock:
> +		return &bpf_rbtree_unlock_proto;
>   	default:
>   		break;
>   	}
> diff --git a/kernel/bpf/rbtree.c b/kernel/bpf/rbtree.c
> index c6f0a2a083f6..bf2e30af82ec 100644
> --- a/kernel/bpf/rbtree.c
> +++ b/kernel/bpf/rbtree.c
> @@ -262,6 +262,35 @@ const struct bpf_func_proto bpf_rbtree_get_lock_proto = {
>   	.arg1_type = ARG_CONST_MAP_PTR,
>   };
>   
> +extern void __bpf_spin_unlock_irqrestore(struct bpf_spin_lock *lock);
> +extern void __bpf_spin_lock_irqsave(struct bpf_spin_lock *lock);
> +
> +BPF_CALL_1(bpf_rbtree_lock, void *, lock)
> +{
> +	__bpf_spin_lock_irqsave((struct bpf_spin_lock *)lock);
> +	return 0;
> +}

it doesn't have to be bpf_spin_lock.
Normal spin_lock will do.
bpf_spin_lock has specific size requirement, so when it's used inside
map value the value size doesn't change from kernel to kernel.
Since this lock is hidden it can be any lock.

Also it needs to remember current task or something.
Just spin_is_locked() from bpf_rbtree_add() is not enough.
Instead of remembering current we can pass hidden 'prog' pointer
and remember that in bpf_rbtree_lock.
Also pass that hidden prog ptr to add/remove/find and compare.
But probably overkill. Current task should be fine.
diff mbox series

Patch

diff --git a/include/uapi/linux/bpf.h b/include/uapi/linux/bpf.h
index c677d92de3bc..d21e2c99ea14 100644
--- a/include/uapi/linux/bpf.h
+++ b/include/uapi/linux/bpf.h
@@ -5391,6 +5391,24 @@  union bpf_attr {
  *
  *	Return
  *		Ptr to lock
+ *
+ * void *bpf_rbtree_lock(struct bpf_spin_lock *lock)
+ *	Description
+ *		Like bpf_spin_lock helper, but use separate helper for now
+ *		as we don't want this helper to have special meaning to the verifier
+ *		so that we can do rbtree helper calls between rbtree_lock/unlock
+ *
+ *	Return
+ *		0
+ *
+ * void *bpf_rbtree_unlock(struct bpf_spin_lock *lock)
+ *	Description
+ *		Like bpf_spin_unlock helper, but use separate helper for now
+ *		as we don't want this helper to have special meaning to the verifier
+ *		so that we can do rbtree helper calls between rbtree_lock/unlock
+ *
+ *	Return
+ *		0
  */
 #define __BPF_FUNC_MAPPER(FN)		\
 	FN(unspec),			\
@@ -5607,6 +5625,8 @@  union bpf_attr {
 	FN(rbtree_remove),		\
 	FN(rbtree_free_node),		\
 	FN(rbtree_get_lock),		\
+	FN(rbtree_lock),		\
+	FN(rbtree_unlock),		\
 	/* */
 
 /* integer value in 'imm' field of BPF_CALL instruction selects which helper
diff --git a/kernel/bpf/helpers.c b/kernel/bpf/helpers.c
index 257a808bb767..fa2dba1dcec8 100644
--- a/kernel/bpf/helpers.c
+++ b/kernel/bpf/helpers.c
@@ -303,7 +303,7 @@  static inline void __bpf_spin_unlock(struct bpf_spin_lock *lock)
 
 static DEFINE_PER_CPU(unsigned long, irqsave_flags);
 
-static inline void __bpf_spin_lock_irqsave(struct bpf_spin_lock *lock)
+inline void __bpf_spin_lock_irqsave(struct bpf_spin_lock *lock)
 {
 	unsigned long flags;
 
@@ -311,6 +311,7 @@  static inline void __bpf_spin_lock_irqsave(struct bpf_spin_lock *lock)
 	__bpf_spin_lock(lock);
 	__this_cpu_write(irqsave_flags, flags);
 }
+EXPORT_SYMBOL(__bpf_spin_lock_irqsave);
 
 notrace BPF_CALL_1(bpf_spin_lock, struct bpf_spin_lock *, lock)
 {
@@ -325,7 +326,7 @@  const struct bpf_func_proto bpf_spin_lock_proto = {
 	.arg1_type	= ARG_PTR_TO_SPIN_LOCK,
 };
 
-static inline void __bpf_spin_unlock_irqrestore(struct bpf_spin_lock *lock)
+inline void __bpf_spin_unlock_irqrestore(struct bpf_spin_lock *lock)
 {
 	unsigned long flags;
 
@@ -333,6 +334,7 @@  static inline void __bpf_spin_unlock_irqrestore(struct bpf_spin_lock *lock)
 	__bpf_spin_unlock(lock);
 	local_irq_restore(flags);
 }
+EXPORT_SYMBOL(__bpf_spin_unlock_irqrestore);
 
 notrace BPF_CALL_1(bpf_spin_unlock, struct bpf_spin_lock *, lock)
 {
@@ -1588,6 +1590,8 @@  const struct bpf_func_proto bpf_rbtree_find_proto __weak;
 const struct bpf_func_proto bpf_rbtree_remove_proto __weak;
 const struct bpf_func_proto bpf_rbtree_free_node_proto __weak;
 const struct bpf_func_proto bpf_rbtree_get_lock_proto __weak;
+const struct bpf_func_proto bpf_rbtree_lock_proto __weak;
+const struct bpf_func_proto bpf_rbtree_unlock_proto __weak;
 
 const struct bpf_func_proto *
 bpf_base_func_proto(enum bpf_func_id func_id)
@@ -1689,6 +1693,10 @@  bpf_base_func_proto(enum bpf_func_id func_id)
 		return &bpf_rbtree_free_node_proto;
 	case BPF_FUNC_rbtree_get_lock:
 		return &bpf_rbtree_get_lock_proto;
+	case BPF_FUNC_rbtree_lock:
+		return &bpf_rbtree_lock_proto;
+	case BPF_FUNC_rbtree_unlock:
+		return &bpf_rbtree_unlock_proto;
 	default:
 		break;
 	}
diff --git a/kernel/bpf/rbtree.c b/kernel/bpf/rbtree.c
index c6f0a2a083f6..bf2e30af82ec 100644
--- a/kernel/bpf/rbtree.c
+++ b/kernel/bpf/rbtree.c
@@ -262,6 +262,35 @@  const struct bpf_func_proto bpf_rbtree_get_lock_proto = {
 	.arg1_type = ARG_CONST_MAP_PTR,
 };
 
+extern void __bpf_spin_unlock_irqrestore(struct bpf_spin_lock *lock);
+extern void __bpf_spin_lock_irqsave(struct bpf_spin_lock *lock);
+
+BPF_CALL_1(bpf_rbtree_lock, void *, lock)
+{
+	__bpf_spin_lock_irqsave((struct bpf_spin_lock *)lock);
+	return 0;
+}
+
+const struct bpf_func_proto bpf_rbtree_lock_proto = {
+	.func = bpf_rbtree_lock,
+	.gpl_only = true,
+	.ret_type = RET_INTEGER,
+	.arg1_type = ARG_PTR_TO_SPIN_LOCK,
+};
+
+BPF_CALL_1(bpf_rbtree_unlock, void *, lock)
+{
+	__bpf_spin_unlock_irqrestore((struct bpf_spin_lock *)lock);
+	return 0;
+}
+
+const struct bpf_func_proto bpf_rbtree_unlock_proto = {
+	.func = bpf_rbtree_unlock,
+	.gpl_only = true,
+	.ret_type = RET_INTEGER,
+	.arg1_type = ARG_PTR_TO_SPIN_LOCK,
+};
+
 BTF_ID_LIST_SINGLE(bpf_rbtree_map_btf_ids, struct, bpf_rbtree)
 const struct bpf_map_ops rbtree_map_ops = {
 	.map_meta_equal = bpf_map_meta_equal,
diff --git a/kernel/bpf/verifier.c b/kernel/bpf/verifier.c
index 535f673882cd..174a355d97fd 100644
--- a/kernel/bpf/verifier.c
+++ b/kernel/bpf/verifier.c
@@ -6047,6 +6047,8 @@  static int check_func_arg(struct bpf_verifier_env *env, u32 arg,
 		} else if (meta->func_id == BPF_FUNC_spin_unlock) {
 			if (process_spin_lock(env, regno, false))
 				return -EACCES;
+		} else if (meta->func_id == BPF_FUNC_rbtree_lock ||
+			   meta->func_id == BPF_FUNC_rbtree_unlock) { // Do nothing for now
 		} else {
 			verbose(env, "verifier internal error\n");
 			return -EFAULT;
diff --git a/tools/include/uapi/linux/bpf.h b/tools/include/uapi/linux/bpf.h
index c677d92de3bc..d21e2c99ea14 100644
--- a/tools/include/uapi/linux/bpf.h
+++ b/tools/include/uapi/linux/bpf.h
@@ -5391,6 +5391,24 @@  union bpf_attr {
  *
  *	Return
  *		Ptr to lock
+ *
+ * void *bpf_rbtree_lock(struct bpf_spin_lock *lock)
+ *	Description
+ *		Like bpf_spin_lock helper, but use separate helper for now
+ *		as we don't want this helper to have special meaning to the verifier
+ *		so that we can do rbtree helper calls between rbtree_lock/unlock
+ *
+ *	Return
+ *		0
+ *
+ * void *bpf_rbtree_unlock(struct bpf_spin_lock *lock)
+ *	Description
+ *		Like bpf_spin_unlock helper, but use separate helper for now
+ *		as we don't want this helper to have special meaning to the verifier
+ *		so that we can do rbtree helper calls between rbtree_lock/unlock
+ *
+ *	Return
+ *		0
  */
 #define __BPF_FUNC_MAPPER(FN)		\
 	FN(unspec),			\
@@ -5607,6 +5625,8 @@  union bpf_attr {
 	FN(rbtree_remove),		\
 	FN(rbtree_free_node),		\
 	FN(rbtree_get_lock),		\
+	FN(rbtree_lock),		\
+	FN(rbtree_unlock),		\
 	/* */
 
 /* integer value in 'imm' field of BPF_CALL instruction selects which helper