===================================================================
@@ -342,35 +342,34 @@ static void bio_alloc_rescue(struct work
}
}
-static void punt_bios_to_rescuer(struct bio_set *bs)
+/**
+ * blk_flush_bio_list
+ *
+ * Pop bios queued on current->bio_list and submit each of them to
+ * their rescue workqueue.
+ *
+ * If the bio doesn't have a bio_set, we leave it on current->bio_list.
+ * However, stacking drivers should use bio_set, so this shouldn't be
+ * an issue.
+ */
+void blk_flush_bio_list(void)
{
- struct bio_list punt, nopunt;
struct bio *bio;
+ struct bio_list list = *current->bio_list;
+ bio_list_init(current->bio_list);
- /*
- * In order to guarantee forward progress we must punt only bios that
- * were allocated from this bio_set; otherwise, if there was a bio on
- * there for a stacking driver higher up in the stack, processing it
- * could require allocating bios from this bio_set, and doing that from
- * our own rescuer would be bad.
- *
- * Since bio lists are singly linked, pop them all instead of trying to
- * remove from the middle of the list:
- */
-
- bio_list_init(&punt);
- bio_list_init(&nopunt);
-
- while ((bio = bio_list_pop(current->bio_list)))
- bio_list_add(bio->bi_pool == bs ? &punt : &nopunt, bio);
-
- *current->bio_list = nopunt;
-
- spin_lock(&bs->rescue_lock);
- bio_list_merge(&bs->rescue_list, &punt);
- spin_unlock(&bs->rescue_lock);
+ while ((bio = bio_list_pop(&list))) {
+ struct bio_set *bs = bio->bi_pool;
+ if (unlikely(!bs)) {
+ bio_list_add(current->bio_list, bio);
+ } else {
+ spin_lock(&bs->rescue_lock);
+ bio_list_add(&bs->rescue_list, bio);
+ spin_unlock(&bs->rescue_lock);
- queue_work(bs->rescue_workqueue, &bs->rescue_work);
+ queue_work(bs->rescue_workqueue, &bs->rescue_work);
+ }
+ }
}
/**
@@ -410,7 +409,6 @@ static void punt_bios_to_rescuer(struct
*/
struct bio *bio_alloc_bioset(gfp_t gfp_mask, int nr_iovecs, struct bio_set *bs)
{
- gfp_t saved_gfp = gfp_mask;
unsigned front_pad;
unsigned inline_vecs;
unsigned long idx = BIO_POOL_NONE;
@@ -428,36 +426,7 @@ struct bio *bio_alloc_bioset(gfp_t gfp_m
front_pad = 0;
inline_vecs = nr_iovecs;
} else {
- /*
- * generic_make_request() converts recursion to iteration; this
- * means if we're running beneath it, any bios we allocate and
- * submit will not be submitted (and thus freed) until after we
- * return.
- *
- * This exposes us to a potential deadlock if we allocate
- * multiple bios from the same bio_set() while running
- * underneath generic_make_request(). If we were to allocate
- * multiple bios (say a stacking block driver that was splitting
- * bios), we would deadlock if we exhausted the mempool's
- * reserve.
- *
- * We solve this, and guarantee forward progress, with a rescuer
- * workqueue per bio_set. If we go to allocate and there are
- * bios on current->bio_list, we first try the allocation
- * without __GFP_WAIT; if that fails, we punt those bios we
- * would be blocking to the rescuer workqueue before we retry
- * with the original gfp_flags.
- */
-
- if (current->bio_list && !bio_list_empty(current->bio_list))
- gfp_mask &= ~__GFP_WAIT;
-
p = mempool_alloc(bs->bio_pool, gfp_mask);
- if (!p && gfp_mask != saved_gfp) {
- punt_bios_to_rescuer(bs);
- gfp_mask = saved_gfp;
- p = mempool_alloc(bs->bio_pool, gfp_mask);
- }
front_pad = bs->front_pad;
inline_vecs = BIO_INLINE_VECS;
@@ -471,11 +440,6 @@ struct bio *bio_alloc_bioset(gfp_t gfp_m
if (nr_iovecs > inline_vecs) {
bvl = bvec_alloc(gfp_mask, nr_iovecs, &idx, bs->bvec_pool);
- if (!bvl && gfp_mask != saved_gfp) {
- punt_bios_to_rescuer(bs);
- gfp_mask = saved_gfp;
- bvl = bvec_alloc(gfp_mask, nr_iovecs, &idx, bs->bvec_pool);
- }
if (unlikely(!bvl))
goto err_free;
===================================================================
@@ -2734,6 +2734,13 @@ static inline void sched_submit_work(str
if (!tsk->state || tsk_is_pi_blocked(tsk))
return;
/*
+ * If there are bios on the bio list, flush them to the appropriate
+ * rescue threads.
+ */
+ if (unlikely(current->bio_list != NULL) &&
+ !bio_list_empty(current->bio_list))
+ blk_flush_bio_list();
+ /*
* If we are going to sleep and we have plugged IO queued,
* make sure to submit it to avoid deadlocks.
*/
===================================================================
@@ -1103,6 +1103,8 @@ static inline bool blk_needs_flush_plug(
!list_empty(&plug->cb_list));
}
+extern void blk_flush_bio_list(void);
+
/*
* tag stuff
*/
@@ -1634,12 +1636,15 @@ static inline void blk_schedule_flush_pl
{
}
-
static inline bool blk_needs_flush_plug(struct task_struct *tsk)
{
return false;
}
+static inline void blk_flush_bio_list(void)
+{
+}
+
#endif /* CONFIG_BLOCK */
#endif