@@ -131,7 +131,8 @@ static inline bool preempt_tag(struct blk_mq_alloc_data *data,
struct sbitmap_queue *bt)
{
return data->preemption ||
- atomic_read(&bt->ws_active) <= SBQ_WAIT_QUEUES;
+ atomic_read(&bt->ws_active) <= SBQ_WAIT_QUEUES ||
+ bt->force_tag_preemption;
}
unsigned int blk_mq_get_tag(struct blk_mq_alloc_data *data)
@@ -143,6 +143,8 @@ struct sbitmap_queue {
* sbitmap_queue_get_shallow()
*/
unsigned int min_shallow_depth;
+
+ bool force_tag_preemption;
};
/**
@@ -434,6 +434,7 @@ int sbitmap_queue_init_node(struct sbitmap_queue *sbq, unsigned int depth,
sbq->wake_batch = sbq_calc_wake_batch(sbq, depth);
atomic_set(&sbq->wake_index, 0);
atomic_set(&sbq->ws_active, 0);
+ sbq->force_tag_preemption = true;
sbq->ws = kzalloc_node(SBQ_WAIT_QUEUES * sizeof(*sbq->ws), flags, node);
if (!sbq->ws) {
@@ -604,6 +605,15 @@ static void sbq_update_wake_index(struct sbitmap_queue *sbq,
atomic_cmpxchg(&sbq->wake_index, old_wake_index, index);
}
+static inline void sbq_update_preemption(struct sbitmap_queue *sbq,
+ unsigned int wake_batch)
+{
+ bool force = (sbq->sb.depth - sbitmap_weight(&sbq->sb)) >=
+ wake_batch << 1;
+
+ WRITE_ONCE(sbq->force_tag_preemption, force);
+}
+
static bool __sbq_wake_up(struct sbitmap_queue *sbq)
{
struct sbq_wait_state *ws;
@@ -637,6 +647,7 @@ static bool __sbq_wake_up(struct sbitmap_queue *sbq)
*/
smp_mb__before_atomic();
atomic_set(&ws->wait_cnt, wake_batch);
+ sbq_update_preemption(sbq, wake_batch);
wake_up_nr(&ws->wait, wake_batch);
return true;
Now that tag preemption is disabled, if wakers doesn't use up 'wake_batch' tags while preemption is still disabled, io concurrency will be declined. To fix the problem, add a detection before wake up, and force tag preemption is free tags are sufficient, so that the extra tags can be used by new io. Signed-off-by: Yu Kuai <yukuai3@huawei.com> --- block/blk-mq-tag.c | 3 ++- include/linux/sbitmap.h | 2 ++ lib/sbitmap.c | 11 +++++++++++ 3 files changed, 15 insertions(+), 1 deletion(-)