@@ -150,9 +150,15 @@ static void hlist_add_ucounts(struct ucounts *ucounts)
spin_unlock_irq(&ucounts_lock);
}
+static inline bool get_ucounts_or_wrap(struct ucounts *ucounts)
+{
+ /* Returns true on a successful get, false if the count wraps. */
+ return !atomic_add_negative(1, &ucounts->count);
+}
+
struct ucounts *get_ucounts(struct ucounts *ucounts)
{
- if (atomic_add_negative(1, &ucounts->count)) {
+ if (!get_ucounts_or_wrap(ucounts)) {
put_ucounts(ucounts);
ucounts = NULL;
}
@@ -163,7 +169,7 @@ struct ucounts *alloc_ucounts(struct user_namespace *ns, kuid_t uid)
{
struct hlist_head *hashent = ucounts_hashentry(ns, uid);
struct ucounts *ucounts, *new;
- long overflow;
+ bool wrapped;
spin_lock_irq(&ucounts_lock);
ucounts = find_ucounts(ns, uid, hashent);
@@ -188,9 +194,9 @@ struct ucounts *alloc_ucounts(struct user_namespace *ns, kuid_t uid)
return new;
}
}
- overflow = atomic_add_negative(1, &ucounts->count);
+ wrapped = !get_ucounts_or_wrap(ucounts);
spin_unlock_irq(&ucounts_lock);
- if (overflow) {
+ if (wrapped) {
put_ucounts(ucounts);
return NULL;
}
Add a helper function get_ucounts_or_wrap that is a trivial wrapper around atomic_add_negative, that makes it clear how atomic_add_negative is used in the context of ucounts. Signed-off-by: "Eric W. Biederman" <ebiederm@xmission.com> --- kernel/ucount.c | 14 ++++++++++---- 1 file changed, 10 insertions(+), 4 deletions(-)