diff mbox series

[net-next,v16,06/26] kref/refcount: implement kref_put_sock()

Message ID 20241219-b4-ovpn-v16-6-3e3001153683@openvpn.net (mailing list archive)
State Changes Requested
Delegated to: Netdev Maintainers
Headers show
Series Introducing OpenVPN Data Channel Offload | expand

Checks

Context Check Description
netdev/series_format fail Series longer than 15 patches
netdev/tree_selection success Clearly marked for net-next, async
netdev/ynl success Generated files up to date; no warnings/errors; GEN HAS DIFF 2 files changed, 2391 insertions(+);
netdev/fixes_present success Fixes tag not required for -next series
netdev/header_inline success No static functions without inline keyword in header files
netdev/build_32bit success Errors and warnings before: 569 this patch: 569
netdev/build_tools success Errors and warnings before: 0 (+23) this patch: 0 (+23)
netdev/cc_maintainers success CCed 6 of 6 maintainers
netdev/build_clang fail Errors and warnings before: 22708 this patch: 22709
netdev/verify_signedoff success Signed-off-by tag matches author and committer
netdev/deprecated_api success None detected
netdev/check_selftest success No net selftest shell script
netdev/verify_fixes success No Fixes tag
netdev/build_allmodconfig_warn fail Errors and warnings before: 15931 this patch: 15932
netdev/checkpatch warning CHECK: extern prototypes should be avoided in .h files
netdev/build_clang_rust success No Rust files in patch. Skipping build
netdev/kdoc success Errors and warnings before: 2 this patch: 2
netdev/source_inline success Was 0 now: 0

Commit Message

Antonio Quartulli Dec. 19, 2024, 1:42 a.m. UTC
Similarly so kref_put_lock(), decrease the refcount
and call bh_lock_sock(sk) if it reached 0.

This kref_put variant comes handy when in need of
atomically cleanup any socket context along with
setting the refcount to 0.

Cc: Will Deacon <will@kernel.org> (maintainer:ATOMIC INFRASTRUCTURE)
Cc: Peter Zijlstra <peterz@infradead.org> (maintainer:ATOMIC INFRASTRUCTURE)
Cc: Boqun Feng <boqun.feng@gmail.com> (reviewer:ATOMIC INFRASTRUCTURE)
Cc: Mark Rutland <mark.rutland@arm.com> (reviewer:ATOMIC INFRASTRUCTURE)
Cc: Andrew Morton <akpm@linux-foundation.org>
Signed-off-by: Antonio Quartulli <antonio@openvpn.net>
---
 include/linux/kref.h     | 11 +++++++++++
 include/linux/refcount.h |  3 +++
 lib/refcount.c           | 32 ++++++++++++++++++++++++++++++++
 3 files changed, 46 insertions(+)

Comments

Will Deacon Dec. 19, 2024, 5:20 p.m. UTC | #1
On Thu, Dec 19, 2024 at 02:42:00AM +0100, Antonio Quartulli wrote:
> Similarly so kref_put_lock(), decrease the refcount
> and call bh_lock_sock(sk) if it reached 0.
> 
> This kref_put variant comes handy when in need of
> atomically cleanup any socket context along with
> setting the refcount to 0.
> 
> Cc: Will Deacon <will@kernel.org> (maintainer:ATOMIC INFRASTRUCTURE)
> Cc: Peter Zijlstra <peterz@infradead.org> (maintainer:ATOMIC INFRASTRUCTURE)
> Cc: Boqun Feng <boqun.feng@gmail.com> (reviewer:ATOMIC INFRASTRUCTURE)
> Cc: Mark Rutland <mark.rutland@arm.com> (reviewer:ATOMIC INFRASTRUCTURE)
> Cc: Andrew Morton <akpm@linux-foundation.org>
> Signed-off-by: Antonio Quartulli <antonio@openvpn.net>
> ---
>  include/linux/kref.h     | 11 +++++++++++
>  include/linux/refcount.h |  3 +++
>  lib/refcount.c           | 32 ++++++++++++++++++++++++++++++++

[...]

> diff --git a/lib/refcount.c b/lib/refcount.c
> index a207a8f22b3ca35890671e51c480266d89e4d8d6..76a728581aa49a41ef13f5141f3f2e9816d72e75 100644
> --- a/lib/refcount.c
> +++ b/lib/refcount.c
> @@ -7,6 +7,7 @@
>  #include <linux/refcount.h>
>  #include <linux/spinlock.h>
>  #include <linux/bug.h>
> +#include <net/sock.h>
>  
>  #define REFCOUNT_WARN(str)	WARN_ONCE(1, "refcount_t: " str ".\n")
>  
> @@ -156,6 +157,37 @@ bool refcount_dec_and_lock(refcount_t *r, spinlock_t *lock)
>  }
>  EXPORT_SYMBOL(refcount_dec_and_lock);
>  
> +/**
> + * refcount_dec_and_lock_sock - return holding locked sock if able to decrement
> + *				refcount to 0
> + * @r: the refcount
> + * @sock: the sock to be locked
> + *
> + * Similar to atomic_dec_and_lock(), it will WARN on underflow and fail to
> + * decrement when saturated at REFCOUNT_SATURATED.
> + *
> + * Provides release memory ordering, such that prior loads and stores are done
> + * before, and provides a control dependency such that free() must come after.
> + * See the comment on top.
> + *
> + * Return: true and hold sock if able to decrement refcount to 0, false
> + *	   otherwise
> + */
> +bool refcount_dec_and_lock_sock(refcount_t *r, struct sock *sock)
> +{
> +	if (refcount_dec_not_one(r))
> +		return false;
> +
> +	bh_lock_sock(sock);
> +	if (!refcount_dec_and_test(r)) {
> +		bh_unlock_sock(sock);
> +		return false;
> +	}
> +
> +	return true;
> +}
> +EXPORT_SYMBOL(refcount_dec_and_lock_sock);

It feels a little out-of-place to me having socket-specific functions in
lib/refcount.c. I'd suggest sticking this somewhere else _or_ maybe we
could generate this pattern of code:

#define REFCOUNT_DEC_AND_LOCKNAME(lockname, locktype, lock, unlock)	\
static __always_inline							\
bool refcount_dec_and_lock_##lockname(refcount_t *r, locktype *l)	\
{									\
	...

inside a generator macro in refcount.h, like we do for seqlocks in
linux/seqlock.h. The downside of that is the cost of inlining.

Will
diff mbox series

Patch

diff --git a/include/linux/kref.h b/include/linux/kref.h
index d32e21a2538c292452db99b915b1bb6c3ab15e53..68b11b0c9c0fdd17cf706e0eb15f74cfe2efa178 100644
--- a/include/linux/kref.h
+++ b/include/linux/kref.h
@@ -90,6 +90,17 @@  static inline int kref_put_lock(struct kref *kref,
 	return 0;
 }
 
+static inline int kref_put_sock(struct kref *kref,
+				void (*release)(struct kref *kref),
+				struct sock *sock)
+{
+	if (refcount_dec_and_lock_sock(&kref->refcount, sock)) {
+		release(kref);
+		return 1;
+	}
+	return 0;
+}
+
 /**
  * kref_get_unless_zero - Increment refcount for object unless it is zero.
  * @kref: object.
diff --git a/include/linux/refcount.h b/include/linux/refcount.h
index 35f039ecb2725618ca098e3515c6e19e2aece3ee..22698db1f24fdaf884a508b8a0cd44eb62194a9f 100644
--- a/include/linux/refcount.h
+++ b/include/linux/refcount.h
@@ -100,6 +100,7 @@ 
 #include <linux/spinlock_types.h>
 
 struct mutex;
+struct sock;
 
 #define REFCOUNT_INIT(n)	{ .refs = ATOMIC_INIT(n), }
 #define REFCOUNT_MAX		INT_MAX
@@ -358,4 +359,6 @@  extern __must_check bool refcount_dec_and_lock(refcount_t *r, spinlock_t *lock)
 extern __must_check bool refcount_dec_and_lock_irqsave(refcount_t *r,
 						       spinlock_t *lock,
 						       unsigned long *flags) __cond_acquires(lock);
+extern __must_check bool refcount_dec_and_lock_sock(refcount_t *r,
+						    struct sock *sock);
 #endif /* _LINUX_REFCOUNT_H */
diff --git a/lib/refcount.c b/lib/refcount.c
index a207a8f22b3ca35890671e51c480266d89e4d8d6..76a728581aa49a41ef13f5141f3f2e9816d72e75 100644
--- a/lib/refcount.c
+++ b/lib/refcount.c
@@ -7,6 +7,7 @@ 
 #include <linux/refcount.h>
 #include <linux/spinlock.h>
 #include <linux/bug.h>
+#include <net/sock.h>
 
 #define REFCOUNT_WARN(str)	WARN_ONCE(1, "refcount_t: " str ".\n")
 
@@ -156,6 +157,37 @@  bool refcount_dec_and_lock(refcount_t *r, spinlock_t *lock)
 }
 EXPORT_SYMBOL(refcount_dec_and_lock);
 
+/**
+ * refcount_dec_and_lock_sock - return holding locked sock if able to decrement
+ *				refcount to 0
+ * @r: the refcount
+ * @sock: the sock to be locked
+ *
+ * Similar to atomic_dec_and_lock(), it will WARN on underflow and fail to
+ * decrement when saturated at REFCOUNT_SATURATED.
+ *
+ * Provides release memory ordering, such that prior loads and stores are done
+ * before, and provides a control dependency such that free() must come after.
+ * See the comment on top.
+ *
+ * Return: true and hold sock if able to decrement refcount to 0, false
+ *	   otherwise
+ */
+bool refcount_dec_and_lock_sock(refcount_t *r, struct sock *sock)
+{
+	if (refcount_dec_not_one(r))
+		return false;
+
+	bh_lock_sock(sock);
+	if (!refcount_dec_and_test(r)) {
+		bh_unlock_sock(sock);
+		return false;
+	}
+
+	return true;
+}
+EXPORT_SYMBOL(refcount_dec_and_lock_sock);
+
 /**
  * refcount_dec_and_lock_irqsave - return holding spinlock with disabled
  *                                 interrupts if able to decrement refcount to 0