@@ -857,6 +857,7 @@ static int bfq_io_set_weight_legacy(struct cgroup_subsys_state *css,
struct blkcg *blkcg = css_to_blkcg(css);
struct bfq_group_data *bfqgd = blkcg_to_bfqgd(blkcg);
struct blkcg_gq *blkg;
+ unsigned long index;
int ret = -ERANGE;
if (val < BFQ_MIN_WEIGHT || val > BFQ_MAX_WEIGHT)
@@ -865,7 +866,7 @@ static int bfq_io_set_weight_legacy(struct cgroup_subsys_state *css,
ret = 0;
xa_lock_irq(&blkcg->blkg_array);
bfqgd->weight = (unsigned short)val;
- hlist_for_each_entry(blkg, &blkcg->blkg_list, blkcg_node) {
+ xa_for_each(&blkcg->blkg_array, index, blkg) {
struct bfq_group *bfqg = blkg_to_bfqg(blkg);
if (!bfqg)
@@ -258,7 +258,6 @@ static struct blkcg_gq *blkg_create(struct blkcg *blkcg,
xa_lock(&blkcg->blkg_array);
__xa_store(&blkcg->blkg_array, q->id, blkg, 0);
- hlist_add_head_rcu(&blkg->blkcg_node, &blkcg->blkg_list);
list_add(&blkg->q_node, &q->blkg_list);
for (i = 0; i < BLKCG_MAX_POLS; i++) {
@@ -372,7 +371,6 @@ static void blkg_destroy(struct blkcg_gq *blkg)
/* Something wrong if we are trying to remove same group twice */
WARN_ON_ONCE(list_empty(&blkg->q_node));
- WARN_ON_ONCE(hlist_unhashed(&blkg->blkcg_node));
for (i = 0; i < BLKCG_MAX_POLS; i++) {
struct blkcg_policy *pol = blkcg_policy[i];
@@ -390,7 +388,6 @@ static void blkg_destroy(struct blkcg_gq *blkg)
__xa_erase(&blkcg->blkg_array, blkg->q->id);
list_del_init(&blkg->q_node);
- hlist_del_init_rcu(&blkg->blkcg_node);
/*
* Both setting lookup hint to and clearing it from @blkg are done
@@ -435,6 +432,7 @@ static int blkcg_reset_stats(struct cgroup_subsys_state *css,
{
struct blkcg *blkcg = css_to_blkcg(css);
struct blkcg_gq *blkg;
+ unsigned long index;
int i;
mutex_lock(&blkcg_pol_mutex);
@@ -445,7 +443,7 @@ static int blkcg_reset_stats(struct cgroup_subsys_state *css,
* stat updates. This is a debug feature which shouldn't exist
* anyway. If you get hit by a race, retry.
*/
- hlist_for_each_entry(blkg, &blkcg->blkg_list, blkcg_node) {
+ xa_for_each(&blkcg->blkg_array, index, blkg) {
blkg_rwstat_reset(&blkg->stat_bytes);
blkg_rwstat_reset(&blkg->stat_ios);
@@ -495,16 +493,15 @@ void blkcg_print_blkgs(struct seq_file *sf, struct blkcg *blkcg,
bool show_total)
{
struct blkcg_gq *blkg;
+ unsigned long index;
u64 total = 0;
- rcu_read_lock();
- hlist_for_each_entry_rcu(blkg, &blkcg->blkg_list, blkcg_node) {
+ xa_for_each(&blkcg->blkg_array, index, blkg) {
spin_lock_irq(&blkg->q->queue_lock);
if (blkcg_policy_enabled(blkg->q, pol))
total += prfill(sf, blkg->pd[pol->plid], data);
spin_unlock_irq(&blkg->q->queue_lock);
}
- rcu_read_unlock();
if (show_total)
seq_printf(sf, "Total %llu\n", (unsigned long long)total);
@@ -924,10 +921,10 @@ static int blkcg_print_stat(struct seq_file *sf, void *v)
{
struct blkcg *blkcg = css_to_blkcg(seq_css(sf));
struct blkcg_gq *blkg;
+ unsigned long index;
rcu_read_lock();
-
- hlist_for_each_entry_rcu(blkg, &blkcg->blkg_list, blkcg_node) {
+ xa_for_each(&blkcg->blkg_array, index, blkg) {
const char *dname;
char *buf;
struct blkg_rwstat rwstat;
@@ -1076,24 +1073,18 @@ static void blkcg_css_offline(struct cgroup_subsys_state *css)
*/
void blkcg_destroy_blkgs(struct blkcg *blkcg)
{
- xa_lock_irq(&blkcg->blkg_array);
+ struct blkcg_gq *blkg;
+ unsigned long index;
- while (!hlist_empty(&blkcg->blkg_list)) {
- struct blkcg_gq *blkg = hlist_entry(blkcg->blkg_list.first,
- struct blkcg_gq, blkcg_node);
+ xa_for_each(&blkcg->blkg_array, index, blkg) {
struct request_queue *q = blkg->q;
- if (spin_trylock(&q->queue_lock)) {
- blkg_destroy(blkg);
- spin_unlock(&q->queue_lock);
- } else {
- xa_unlock_irq(&blkcg->blkg_array);
- cpu_relax();
- xa_lock_irq(&blkcg->blkg_array);
- }
+ spin_lock_irq(&q->queue_lock);
+ xa_lock(&blkcg->blkg_array);
+ blkg_destroy(blkg);
+ xa_unlock(&blkcg->blkg_array);
+ spin_unlock_irq(&q->queue_lock);
}
-
- xa_unlock_irq(&blkcg->blkg_array);
}
static void blkcg_css_free(struct cgroup_subsys_state *css)
@@ -1159,7 +1150,6 @@ blkcg_css_alloc(struct cgroup_subsys_state *parent_css)
}
xa_init_flags(&blkcg->blkg_array, XA_FLAGS_LOCK_IRQ);
- INIT_HLIST_HEAD(&blkcg->blkg_list);
#ifdef CONFIG_CGROUP_WRITEBACK
INIT_LIST_HEAD(&blkcg->cgwb_list);
refcount_set(&blkcg->cgwb_refcnt, 1);
@@ -49,7 +49,6 @@ struct blkcg {
struct xarray blkg_array;
struct blkcg_gq __rcu *blkg_hint;
- struct hlist_head blkg_list;
struct blkcg_policy_data *cpd[BLKCG_MAX_POLS];
@@ -110,7 +109,6 @@ struct blkcg_gq {
/* Pointer to the associated request_queue */
struct request_queue *q;
struct list_head q_node;
- struct hlist_node blkcg_node;
struct blkcg *blkcg;
/*
We can iterate over all blkcgs using the XArray iterator instead of maintaining a separate hlist. This removes a nasty locking inversion in blkcg_destroy_blkgs(). Signed-off-by: Matthew Wilcox <willy@infradead.org> --- block/bfq-cgroup.c | 3 ++- block/blk-cgroup.c | 38 ++++++++++++++------------------------ include/linux/blk-cgroup.h | 2 -- 3 files changed, 16 insertions(+), 27 deletions(-)