@@ -277,7 +277,7 @@ __weak noinline void bpf_rstat_flush(struct cgroup *cgrp,
__bpf_hook_end();
/*
- * Helper functions for locking cgroup_rstat_lock.
+ * Helper functions for locking.
*
* This makes it easier to diagnose locking issues and contention in
* production environments. The parameter @cpu_in_loop indicate lock
@@ -285,29 +285,32 @@ __bpf_hook_end();
* value -1 is used when obtaining the main lock else this is the CPU
* number processed last.
*/
-static inline void __cgroup_rstat_lock(struct cgroup *cgrp, int cpu_in_loop)
- __acquires(&cgroup_rstat_lock)
+static inline void __cgroup_rstat_lock(spinlock_t *lock,
+ struct cgroup *cgrp, int cpu_in_loop)
+ __acquires(lock)
{
bool contended;
- contended = !spin_trylock_irq(&cgroup_rstat_lock);
+ contended = !spin_trylock_irq(lock);
if (contended) {
trace_cgroup_rstat_lock_contended(cgrp, cpu_in_loop, contended);
- spin_lock_irq(&cgroup_rstat_lock);
+ spin_lock_irq(lock);
}
trace_cgroup_rstat_locked(cgrp, cpu_in_loop, contended);
}
-static inline void __cgroup_rstat_unlock(struct cgroup *cgrp, int cpu_in_loop)
- __releases(&cgroup_rstat_lock)
+static inline void __cgroup_rstat_unlock(spinlock_t *lock,
+ struct cgroup *cgrp, int cpu_in_loop)
+ __releases(lock)
{
trace_cgroup_rstat_unlock(cgrp, cpu_in_loop, false);
- spin_unlock_irq(&cgroup_rstat_lock);
+ spin_unlock_irq(lock);
}
/* see cgroup_rstat_flush() */
-static void cgroup_rstat_flush_locked(struct cgroup_subsys_state *css)
- __releases(&cgroup_rstat_lock) __acquires(&cgroup_rstat_lock)
+static void cgroup_rstat_flush_locked(struct cgroup_subsys_state *css,
+ spinlock_t *lock)
+ __releases(lock) __acquires(lock)
{
struct cgroup *cgrp = css->cgroup;
int cpu;
@@ -328,11 +331,11 @@ static void cgroup_rstat_flush_locked(struct cgroup_subsys_state *css)
}
/* play nice and yield if necessary */
- if (need_resched() || spin_needbreak(&cgroup_rstat_lock)) {
- __cgroup_rstat_unlock(cgrp, cpu);
+ if (need_resched() || spin_needbreak(lock)) {
+ __cgroup_rstat_unlock(lock, cgrp, cpu);
if (!cond_resched())
cpu_relax();
- __cgroup_rstat_lock(cgrp, cpu);
+ __cgroup_rstat_lock(lock, cgrp, cpu);
}
}
}
@@ -356,9 +359,9 @@ __bpf_kfunc void cgroup_rstat_flush(struct cgroup_subsys_state *css)
might_sleep();
- __cgroup_rstat_lock(cgrp, -1);
- cgroup_rstat_flush_locked(css);
- __cgroup_rstat_unlock(cgrp, -1);
+ __cgroup_rstat_lock(&cgroup_rstat_lock, cgrp, -1);
+ cgroup_rstat_flush_locked(css, &cgroup_rstat_lock);
+ __cgroup_rstat_unlock(&cgroup_rstat_lock, cgrp, -1);
}
/**
@@ -375,8 +378,8 @@ void cgroup_rstat_flush_hold(struct cgroup_subsys_state *css)
struct cgroup *cgrp = css->cgroup;
might_sleep();
- __cgroup_rstat_lock(cgrp, -1);
- cgroup_rstat_flush_locked(css);
+ __cgroup_rstat_lock(&cgroup_rstat_lock, cgrp, -1);
+ cgroup_rstat_flush_locked(css, &cgroup_rstat_lock);
}
/**
@@ -386,7 +389,7 @@ void cgroup_rstat_flush_hold(struct cgroup_subsys_state *css)
void cgroup_rstat_flush_release(struct cgroup_subsys_state *css)
{
struct cgroup *cgrp = css->cgroup;
- __cgroup_rstat_unlock(cgrp, -1);
+ __cgroup_rstat_unlock(&cgroup_rstat_lock, cgrp, -1);
}
int cgroup_rstat_init(struct cgroup_subsys_state *css)