@@ -77,12 +77,6 @@ enum {
* percpu_ref_switch_to_percpu() is invoked on it.
*/
PERCPU_REF_INIT_ATOMIC = 1 << 0,
-
- /*
- * Start dead w/ ref == 0 in atomic mode. Must be revived with
- * percpu_ref_reinit() before used. Implies INIT_ATOMIC.
- */
- PERCPU_REF_INIT_DEAD = 1 << 1,
};
struct percpu_ref {
@@ -109,7 +103,6 @@ void percpu_ref_switch_to_percpu(struct percpu_ref *ref);
void percpu_ref_kill_and_confirm(struct percpu_ref *ref,
percpu_ref_func_t *confirm_kill);
void percpu_ref_resurrect(struct percpu_ref *ref);
-void percpu_ref_reinit(struct percpu_ref *ref);
/**
* percpu_ref_kill - drop the initial ref
@@ -70,15 +70,12 @@ int percpu_ref_init(struct percpu_ref *ref, percpu_ref_func_t *release,
ref->force_atomic = flags & PERCPU_REF_INIT_ATOMIC;
- if (flags & (PERCPU_REF_INIT_ATOMIC | PERCPU_REF_INIT_DEAD))
+ if (flags & PERCPU_REF_INIT_ATOMIC)
ref->percpu_count_ptr |= __PERCPU_REF_ATOMIC;
else
start_count += PERCPU_COUNT_BIAS;
- if (flags & PERCPU_REF_INIT_DEAD)
- ref->percpu_count_ptr |= __PERCPU_REF_DEAD;
- else
- start_count++;
+ start_count++;
atomic_long_set(&ref->count, start_count);
@@ -282,14 +279,11 @@ EXPORT_SYMBOL_GPL(percpu_ref_switch_to_atomic_sync);
* @ref: percpu_ref to switch to percpu mode
*
* There's no reason to use this function for the usual reference counting.
- * To re-use an expired ref, use percpu_ref_reinit().
*
* Switch @ref to percpu mode. This function may be invoked concurrently
* with all the get/put operations and can safely be mixed with kill and
* reinit operations. This function reverses the sticky atomic state set
- * by PERCPU_REF_INIT_ATOMIC or percpu_ref_switch_to_atomic(). If @ref is
- * dying or dead, the actual switching takes place on the following
- * percpu_ref_reinit().
+ * by PERCPU_REF_INIT_ATOMIC or percpu_ref_switch_to_atomic().
*
* This function may block if @ref is in the process of switching to atomic
* mode. If the caller ensures that @ref is not in the process of
@@ -343,25 +337,6 @@ void percpu_ref_kill_and_confirm(struct percpu_ref *ref,
}
EXPORT_SYMBOL_GPL(percpu_ref_kill_and_confirm);
-/**
- * percpu_ref_reinit - re-initialize a percpu refcount
- * @ref: perpcu_ref to re-initialize
- *
- * Re-initialize @ref so that it's in the same state as when it finished
- * percpu_ref_init() ignoring %PERCPU_REF_INIT_DEAD. @ref must have been
- * initialized successfully and reached 0 but not exited.
- *
- * Note that percpu_ref_tryget[_live]() are safe to perform on @ref while
- * this function is in progress.
- */
-void percpu_ref_reinit(struct percpu_ref *ref)
-{
- WARN_ON_ONCE(!percpu_ref_is_zero(ref));
-
- percpu_ref_resurrect(ref);
-}
-EXPORT_SYMBOL_GPL(percpu_ref_reinit);
-
/**
* percpu_ref_resurrect - modify a percpu refcount from dead to live
* @ref: perpcu_ref to resurrect
Kill off PERCPU_REF_INIT_DEAD and percpu_ref_reinit() for lack of users. They were being used in cgroups, but this was causing problems with the refcounting in the event that kernfs_get_tree() created a superblock and then failed with the refcount not fully initialised. Signed-off-by: David Howells <dhowells@redhat.com> --- include/linux/percpu-refcount.h | 7 ------- lib/percpu-refcount.c | 31 +++---------------------------- 2 files changed, 3 insertions(+), 35 deletions(-)