@@ -1200,7 +1200,7 @@ static int blkcg_print_stat(struct seq_file *sf, void *v)
if (!seq_css(sf)->parent)
blkcg_fill_root_iostats();
else
- cgroup_rstat_flush(blkcg->css.cgroup);
+ cgroup_rstat_flush(&blkcg->css);
rcu_read_lock();
hlist_for_each_entry_rcu(blkg, &blkcg->blkg_list, blkcg_node) {
@@ -2183,7 +2183,7 @@ void blk_cgroup_bio_start(struct bio *bio)
}
u64_stats_update_end_irqrestore(&bis->sync, flags);
- cgroup_rstat_updated(blkcg->css.cgroup, cpu);
+ cgroup_rstat_updated(&blkcg->css, cpu);
put_cpu();
}
@@ -687,10 +687,10 @@ static inline void cgroup_path_from_kernfs_id(u64 id, char *buf, size_t buflen)
/*
* cgroup scalable recursive statistics.
*/
-void cgroup_rstat_updated(struct cgroup *cgrp, int cpu);
-void cgroup_rstat_flush(struct cgroup *cgrp);
-void cgroup_rstat_flush_hold(struct cgroup *cgrp);
-void cgroup_rstat_flush_release(struct cgroup *cgrp);
+void cgroup_rstat_updated(struct cgroup_subsys_state *css, int cpu);
+void cgroup_rstat_flush(struct cgroup_subsys_state *css);
+void cgroup_rstat_flush_hold(struct cgroup_subsys_state *css);
+void cgroup_rstat_flush_release(struct cgroup_subsys_state *css);
/*
* Basic resource stats.
@@ -5465,7 +5465,7 @@ static void css_release_work_fn(struct work_struct *work)
/* css release path */
if (!list_empty(&css->rstat_css_node)) {
- cgroup_rstat_flush(cgrp);
+ cgroup_rstat_flush(css);
list_del_rcu(&css->rstat_css_node);
}
@@ -5493,7 +5493,7 @@ static void css_release_work_fn(struct work_struct *work)
/* cgroup release path */
TRACE_CGROUP_PATH(release, cgrp);
- cgroup_rstat_flush(cgrp);
+ cgroup_rstat_flush(css);
spin_lock_irq(&css_set_lock);
for (tcgrp = cgroup_parent(cgrp); tcgrp;
@@ -82,8 +82,9 @@ void _cgroup_rstat_cpu_unlock(raw_spinlock_t *cpu_lock, int cpu,
* rstat_cpu->updated_children list. See the comment on top of
* cgroup_rstat_cpu definition for details.
*/
-__bpf_kfunc void cgroup_rstat_updated(struct cgroup *cgrp, int cpu)
+__bpf_kfunc void cgroup_rstat_updated(struct cgroup_subsys_state *css, int cpu)
{
+ struct cgroup *cgrp = css->cgroup;
raw_spinlock_t *cpu_lock = per_cpu_ptr(&cgroup_rstat_cpu_lock, cpu);
unsigned long flags;
@@ -346,8 +347,10 @@ static void cgroup_rstat_flush_locked(struct cgroup *cgrp)
*
* This function may block.
*/
-__bpf_kfunc void cgroup_rstat_flush(struct cgroup *cgrp)
+__bpf_kfunc void cgroup_rstat_flush(struct cgroup_subsys_state *css)
{
+ struct cgroup *cgrp = css->cgroup;
+
might_sleep();
__cgroup_rstat_lock(cgrp, -1);
@@ -364,9 +367,11 @@ __bpf_kfunc void cgroup_rstat_flush(struct cgroup *cgrp)
*
* This function may block.
*/
-void cgroup_rstat_flush_hold(struct cgroup *cgrp)
+void cgroup_rstat_flush_hold(struct cgroup_subsys_state *css)
__acquires(&cgroup_rstat_lock)
{
+ struct cgroup *cgrp = css->cgroup;
+
might_sleep();
__cgroup_rstat_lock(cgrp, -1);
cgroup_rstat_flush_locked(cgrp);
@@ -376,9 +381,11 @@ void cgroup_rstat_flush_hold(struct cgroup *cgrp)
* cgroup_rstat_flush_release - release cgroup_rstat_flush_hold()
* @cgrp: cgroup used by tracepoint
*/
-void cgroup_rstat_flush_release(struct cgroup *cgrp)
+void cgroup_rstat_flush_release(struct cgroup_subsys_state *css)
__releases(&cgroup_rstat_lock)
{
+ struct cgroup *cgrp = css->cgroup;
+
__cgroup_rstat_unlock(cgrp, -1);
}
@@ -408,7 +415,7 @@ void cgroup_rstat_exit(struct cgroup *cgrp)
{
int cpu;
- cgroup_rstat_flush(cgrp);
+ cgroup_rstat_flush(&cgrp->self);
/* sanity check */
for_each_possible_cpu(cpu) {
@@ -512,8 +519,10 @@ static void cgroup_base_stat_cputime_account_end(struct cgroup *cgrp,
struct cgroup_rstat_cpu *rstatc,
unsigned long flags)
{
+ struct cgroup_subsys_state *css = &cgrp->self;
+
u64_stats_update_end_irqrestore(&rstatc->bsync, flags);
- cgroup_rstat_updated(cgrp, smp_processor_id());
+ cgroup_rstat_updated(css, smp_processor_id());
put_cpu_ptr(rstatc);
}
@@ -612,16 +621,17 @@ static void cgroup_force_idle_show(struct seq_file *seq, struct cgroup_base_stat
void cgroup_base_stat_cputime_show(struct seq_file *seq)
{
- struct cgroup *cgrp = seq_css(seq)->cgroup;
+ struct cgroup_subsys_state *css = seq_css(seq);
+ struct cgroup *cgrp = css->cgroup;
u64 usage, utime, stime, ntime;
if (cgroup_parent(cgrp)) {
- cgroup_rstat_flush_hold(cgrp);
+ cgroup_rstat_flush_hold(css);
usage = cgrp->bstat.cputime.sum_exec_runtime;
cputime_adjust(&cgrp->bstat.cputime, &cgrp->prev_cputime,
&utime, &stime);
ntime = cgrp->bstat.ntime;
- cgroup_rstat_flush_release(cgrp);
+ cgroup_rstat_flush_release(css);
} else {
/* cgrp->bstat of root is not actually used, reuse it */
root_cgroup_cputime(&cgrp->bstat);
@@ -579,7 +579,7 @@ static inline void memcg_rstat_updated(struct mem_cgroup *memcg, int val)
if (!val)
return;
- cgroup_rstat_updated(memcg->css.cgroup, cpu);
+ cgroup_rstat_updated(&memcg->css, cpu);
statc = this_cpu_ptr(memcg->vmstats_percpu);
for (; statc; statc = statc->parent) {
stats_updates = READ_ONCE(statc->stats_updates) + abs(val);
@@ -611,7 +611,7 @@ static void __mem_cgroup_flush_stats(struct mem_cgroup *memcg, bool force)
if (mem_cgroup_is_root(memcg))
WRITE_ONCE(flush_last_time, jiffies_64);
- cgroup_rstat_flush(memcg->css.cgroup);
+ cgroup_rstat_flush(&memcg->css);
}
/*
Change API calls to accept a cgroup_subsys_state instead of a cgroup. Signed-off-by: JP Kobryn <inwardvessel@gmail.com> --- block/blk-cgroup.c | 4 ++-- include/linux/cgroup.h | 8 ++++---- kernel/cgroup/cgroup.c | 4 ++-- kernel/cgroup/rstat.c | 28 +++++++++++++++++++--------- mm/memcontrol.c | 4 ++-- 5 files changed, 29 insertions(+), 19 deletions(-)