diff mbox

[01/20] block: add ability to flag write back caching on a device

Message ID 20160413161452.GB22494@kernel.dk (mailing list archive)
State New, archived
Headers show

Commit Message

Jens Axboe April 13, 2016, 4:14 p.m. UTC
On Wed, Apr 13 2016, Jens Axboe wrote:
> On Wed, Apr 13 2016, Ming Lei wrote:
> > > +static ssize_t queue_wc_store(struct request_queue *q, const char *page,
> > > +                             size_t count)
> > > +{
> > > +       int set = -1;
> > > +
> > > +       if (!strncmp(page, "write back", 10))
> > > +               set = 1;
> > > +       else if (!strncmp(page, "write through", 13) ||
> > > +                !strncmp(page, "none", 4))
> > > +               set = 0;
> > > +
> > > +       if (set == -1)
> > > +               return -EINVAL;
> > > +
> > > +       spin_lock_irq(q->queue_lock);
> > > +       if (set)
> > > +               queue_flag_set(QUEUE_FLAG_WC, q);
> > > +       else
> > > +               queue_flag_clear(QUEUE_FLAG_WC, q);
> > > +       spin_unlock_irq(q->queue_lock);
> > 
> > When the write cache (queue)flag is changed, I guess we need to change
> > q->flush_flags via blk_queue_write_cache() too? Otherwise, the change
> > on write cache flag can't affect block flush behaviour.
> > 
> > BTW, looks the flush_flags has been redundant after this patch, can
> > we just kill it in the future?
> 
> How about we fold both those concerns into one? Kill off ->flush_flags,
> and that then fixes the other part as well. Like the below, untested.

Minor screwup in target_core_iblock.c and a missing unsigned -> long
conversion in dm. Also changed the dm part to pass by value, not
reference.

Comments

Ming Lei April 13, 2016, 6:50 p.m. UTC | #1
On Thu, Apr 14, 2016 at 12:14 AM, Jens Axboe <axboe@fb.com> wrote:
> On Wed, Apr 13 2016, Jens Axboe wrote:
>> On Wed, Apr 13 2016, Ming Lei wrote:
>> > > +static ssize_t queue_wc_store(struct request_queue *q, const char *page,
>> > > +                             size_t count)
>> > > +{
>> > > +       int set = -1;
>> > > +
>> > > +       if (!strncmp(page, "write back", 10))
>> > > +               set = 1;
>> > > +       else if (!strncmp(page, "write through", 13) ||
>> > > +                !strncmp(page, "none", 4))
>> > > +               set = 0;
>> > > +
>> > > +       if (set == -1)
>> > > +               return -EINVAL;
>> > > +
>> > > +       spin_lock_irq(q->queue_lock);
>> > > +       if (set)
>> > > +               queue_flag_set(QUEUE_FLAG_WC, q);
>> > > +       else
>> > > +               queue_flag_clear(QUEUE_FLAG_WC, q);
>> > > +       spin_unlock_irq(q->queue_lock);
>> >
>> > When the write cache (queue)flag is changed, I guess we need to change
>> > q->flush_flags via blk_queue_write_cache() too? Otherwise, the change
>> > on write cache flag can't affect block flush behaviour.
>> >
>> > BTW, looks the flush_flags has been redundant after this patch, can
>> > we just kill it in the future?
>>
>> How about we fold both those concerns into one? Kill off ->flush_flags,
>> and that then fixes the other part as well. Like the below, untested.
>
> Minor screwup in target_core_iblock.c and a missing unsigned -> long
> conversion in dm. Also changed the dm part to pass by value, not
> reference.

The patch Looks fine.

Also has another question about the sysfs interface for changing cache type:
how to make kernel's view consistent with device's cache type setting?

For example, someone changes cache type from writeback to writethrough
by the sysfs interface, but the type inside device is still writeback, then may
cause trouble after kernel doesn't handle flush.

>
> diff --git a/block/blk-core.c b/block/blk-core.c
> index c50227796a26..2475b1c72773 100644
> --- a/block/blk-core.c
> +++ b/block/blk-core.c
> @@ -1964,7 +1964,8 @@ generic_make_request_checks(struct bio *bio)
>          * drivers without flush support don't have to worry
>          * about them.
>          */
> -       if ((bio->bi_rw & (REQ_FLUSH | REQ_FUA)) && !q->flush_flags) {
> +       if ((bio->bi_rw & (REQ_FLUSH | REQ_FUA)) &&
> +           !test_bit(QUEUE_FLAG_WC, &q->queue_flags)) {
>                 bio->bi_rw &= ~(REQ_FLUSH | REQ_FUA);
>                 if (!nr_sectors) {
>                         err = 0;
> diff --git a/block/blk-flush.c b/block/blk-flush.c
> index 9c423e53324a..b1c91d229e5e 100644
> --- a/block/blk-flush.c
> +++ b/block/blk-flush.c
> @@ -95,17 +95,18 @@ enum {
>  static bool blk_kick_flush(struct request_queue *q,
>                            struct blk_flush_queue *fq);
>
> -static unsigned int blk_flush_policy(unsigned int fflags, struct request *rq)
> +static unsigned int blk_flush_policy(unsigned long fflags, struct request *rq)
>  {
>         unsigned int policy = 0;
>
>         if (blk_rq_sectors(rq))
>                 policy |= REQ_FSEQ_DATA;
>
> -       if (fflags & REQ_FLUSH) {
> +       if (fflags & (1UL << QUEUE_FLAG_WC)) {
>                 if (rq->cmd_flags & REQ_FLUSH)
>                         policy |= REQ_FSEQ_PREFLUSH;
> -               if (!(fflags & REQ_FUA) && (rq->cmd_flags & REQ_FUA))
> +               if (!(fflags & (1UL << QUEUE_FLAG_FUA)) &&
> +                   (rq->cmd_flags & REQ_FUA))
>                         policy |= REQ_FSEQ_POSTFLUSH;
>         }
>         return policy;
> @@ -384,7 +385,7 @@ static void mq_flush_data_end_io(struct request *rq, int error)
>  void blk_insert_flush(struct request *rq)
>  {
>         struct request_queue *q = rq->q;
> -       unsigned int fflags = q->flush_flags;   /* may change, cache */
> +       unsigned long fflags = q->queue_flags;  /* may change, cache */
>         unsigned int policy = blk_flush_policy(fflags, rq);
>         struct blk_flush_queue *fq = blk_get_flush_queue(q, rq->mq_ctx);
>
> @@ -393,7 +394,7 @@ void blk_insert_flush(struct request *rq)
>          * REQ_FLUSH and FUA for the driver.
>          */
>         rq->cmd_flags &= ~REQ_FLUSH;
> -       if (!(fflags & REQ_FUA))
> +       if (!(fflags & (1UL << QUEUE_FLAG_FUA)))
>                 rq->cmd_flags &= ~REQ_FUA;
>
>         /*
> diff --git a/block/blk-settings.c b/block/blk-settings.c
> index 80d9327a214b..f679ae122843 100644
> --- a/block/blk-settings.c
> +++ b/block/blk-settings.c
> @@ -822,7 +822,12 @@ EXPORT_SYMBOL(blk_queue_update_dma_alignment);
>
>  void blk_queue_flush_queueable(struct request_queue *q, bool queueable)
>  {
> -       q->flush_not_queueable = !queueable;
> +       spin_lock_irq(q->queue_lock);
> +       if (queueable)
> +               clear_bit(QUEUE_FLAG_FLUSH_NQ, &q->queue_flags);
> +       else
> +               set_bit(QUEUE_FLAG_FLUSH_NQ, &q->queue_flags);
> +       spin_unlock_irq(q->queue_lock);
>  }
>  EXPORT_SYMBOL_GPL(blk_queue_flush_queueable);
>
> @@ -837,16 +842,13 @@ EXPORT_SYMBOL_GPL(blk_queue_flush_queueable);
>  void blk_queue_write_cache(struct request_queue *q, bool wc, bool fua)
>  {
>         spin_lock_irq(q->queue_lock);
> -       if (wc) {
> +       if (wc)
>                 queue_flag_set(QUEUE_FLAG_WC, q);
> -               q->flush_flags = REQ_FLUSH;
> -       } else
> +       else
>                 queue_flag_clear(QUEUE_FLAG_WC, q);
> -       if (fua) {
> -               if (wc)
> -                       q->flush_flags |= REQ_FUA;
> +       if (fua)
>                 queue_flag_set(QUEUE_FLAG_FUA, q);
> -       } else
> +       else
>                 queue_flag_clear(QUEUE_FLAG_FUA, q);
>         spin_unlock_irq(q->queue_lock);
>  }
> diff --git a/drivers/block/xen-blkback/xenbus.c b/drivers/block/xen-blkback/xenbus.c
> index 26aa080e243c..3355f1cdd4e5 100644
> --- a/drivers/block/xen-blkback/xenbus.c
> +++ b/drivers/block/xen-blkback/xenbus.c
> @@ -477,7 +477,7 @@ static int xen_vbd_create(struct xen_blkif *blkif, blkif_vdev_t handle,
>                 vbd->type |= VDISK_REMOVABLE;
>
>         q = bdev_get_queue(bdev);
> -       if (q && q->flush_flags)
> +       if (q && test_bit(QUEUE_FLAG_WC, &q->queue_flags))
>                 vbd->flush_support = true;
>
>         if (q && blk_queue_secdiscard(q))
> diff --git a/drivers/md/dm-table.c b/drivers/md/dm-table.c
> index 4b1ffc0abe11..626a5ec04466 100644
> --- a/drivers/md/dm-table.c
> +++ b/drivers/md/dm-table.c
> @@ -1348,13 +1348,13 @@ static void dm_table_verify_integrity(struct dm_table *t)
>  static int device_flush_capable(struct dm_target *ti, struct dm_dev *dev,
>                                 sector_t start, sector_t len, void *data)
>  {
> -       unsigned flush = (*(unsigned *)data);
> +       unsigned long flush = (unsigned long) data;
>         struct request_queue *q = bdev_get_queue(dev->bdev);
>
> -       return q && (q->flush_flags & flush);
> +       return q && (q->queue_flags & flush);
>  }
>
> -static bool dm_table_supports_flush(struct dm_table *t, unsigned flush)
> +static bool dm_table_supports_flush(struct dm_table *t, unsigned long flush)
>  {
>         struct dm_target *ti;
>         unsigned i = 0;
> @@ -1375,7 +1375,7 @@ static bool dm_table_supports_flush(struct dm_table *t, unsigned flush)
>                         return true;
>
>                 if (ti->type->iterate_devices &&
> -                   ti->type->iterate_devices(ti, device_flush_capable, &flush))
> +                   ti->type->iterate_devices(ti, device_flush_capable, (void *) flush))
>                         return true;
>         }
>
> @@ -1518,9 +1518,9 @@ void dm_table_set_restrictions(struct dm_table *t, struct request_queue *q,
>         else
>                 queue_flag_set_unlocked(QUEUE_FLAG_DISCARD, q);
>
> -       if (dm_table_supports_flush(t, REQ_FLUSH)) {
> +       if (dm_table_supports_flush(t, (1UL << QUEUE_FLAG_WC))) {
>                 wc = true;
> -               if (dm_table_supports_flush(t, REQ_FUA))
> +               if (dm_table_supports_flush(t, (1UL << QUEUE_FLAG_FUA)))
>                         fua = true;
>         }
>         blk_queue_write_cache(q, wc, fua);
> diff --git a/drivers/md/raid5-cache.c b/drivers/md/raid5-cache.c
> index 9531f5f05b93..26f14970a858 100644
> --- a/drivers/md/raid5-cache.c
> +++ b/drivers/md/raid5-cache.c
> @@ -1188,6 +1188,7 @@ ioerr:
>
>  int r5l_init_log(struct r5conf *conf, struct md_rdev *rdev)
>  {
> +       struct request_queue *q = bdev_get_queue(rdev->bdev);
>         struct r5l_log *log;
>
>         if (PAGE_SIZE != 4096)
> @@ -1197,7 +1198,7 @@ int r5l_init_log(struct r5conf *conf, struct md_rdev *rdev)
>                 return -ENOMEM;
>         log->rdev = rdev;
>
> -       log->need_cache_flush = (rdev->bdev->bd_disk->queue->flush_flags != 0);
> +       log->need_cache_flush = test_bit(QUEUE_FLAG_WC, &q->queue_flags) != 0;
>
>         log->uuid_checksum = crc32c_le(~0, rdev->mddev->uuid,
>                                        sizeof(rdev->mddev->uuid));
> diff --git a/drivers/target/target_core_iblock.c b/drivers/target/target_core_iblock.c
> index 026a758e5778..7c4efb4417b0 100644
> --- a/drivers/target/target_core_iblock.c
> +++ b/drivers/target/target_core_iblock.c
> @@ -687,10 +687,10 @@ iblock_execute_rw(struct se_cmd *cmd, struct scatterlist *sgl, u32 sgl_nents,
>                  * Force writethrough using WRITE_FUA if a volatile write cache
>                  * is not enabled, or if initiator set the Force Unit Access bit.
>                  */
> -               if (q->flush_flags & REQ_FUA) {
> +               if (test_bit(QUEUE_FLAG_FUA, &q->queue_flags)) {
>                         if (cmd->se_cmd_flags & SCF_FUA)
>                                 rw = WRITE_FUA;
> -                       else if (!(q->flush_flags & REQ_FLUSH))
> +                       else if (!test_bit(QUEUE_FLAG_WC, &q->queue_flags))
>                                 rw = WRITE_FUA;
>                         else
>                                 rw = WRITE;
> @@ -836,7 +836,7 @@ static bool iblock_get_write_cache(struct se_device *dev)
>         struct block_device *bd = ib_dev->ibd_bd;
>         struct request_queue *q = bdev_get_queue(bd);
>
> -       return q->flush_flags & REQ_FLUSH;
> +       return test_bit(QUEUE_FLAG_WC, &q->queue_flags);
>  }
>
>  static const struct target_backend_ops iblock_ops = {
> diff --git a/include/linux/blkdev.h b/include/linux/blkdev.h
> index f3f232fa505d..57c085917da6 100644
> --- a/include/linux/blkdev.h
> +++ b/include/linux/blkdev.h
> @@ -433,8 +433,6 @@ struct request_queue {
>         /*
>          * for flush operations
>          */
> -       unsigned int            flush_flags;
> -       unsigned int            flush_not_queueable:1;
>         struct blk_flush_queue  *fq;
>
>         struct list_head        requeue_list;
> @@ -493,6 +491,7 @@ struct request_queue {
>  #define QUEUE_FLAG_POLL               22       /* IO polling enabled if set */
>  #define QUEUE_FLAG_WC         23       /* Write back caching */
>  #define QUEUE_FLAG_FUA        24       /* device supports FUA writes */
> +#define QUEUE_FLAG_FLUSH_NQ    25      /* flush not queueuable */
>
>  #define QUEUE_FLAG_DEFAULT     ((1 << QUEUE_FLAG_IO_STAT) |            \
>                                  (1 << QUEUE_FLAG_STACKABLE)    |       \
> @@ -1365,7 +1364,7 @@ static inline unsigned int block_size(struct block_device *bdev)
>
>  static inline bool queue_flush_queueable(struct request_queue *q)
>  {
> -       return !q->flush_not_queueable;
> +       return !test_bit(QUEUE_FLAG_FLUSH_NQ, &q->queue_flags);
>  }
>
>  typedef struct {struct page *v;} Sector;
>
> --
> Jens Axboe
>
Jens Axboe April 13, 2016, 7:33 p.m. UTC | #2
On 04/13/2016 12:50 PM, Ming Lei wrote:
> On Thu, Apr 14, 2016 at 12:14 AM, Jens Axboe <axboe@fb.com> wrote:
>> On Wed, Apr 13 2016, Jens Axboe wrote:
>>> On Wed, Apr 13 2016, Ming Lei wrote:
>>>>> +static ssize_t queue_wc_store(struct request_queue *q, const char *page,
>>>>> +                             size_t count)
>>>>> +{
>>>>> +       int set = -1;
>>>>> +
>>>>> +       if (!strncmp(page, "write back", 10))
>>>>> +               set = 1;
>>>>> +       else if (!strncmp(page, "write through", 13) ||
>>>>> +                !strncmp(page, "none", 4))
>>>>> +               set = 0;
>>>>> +
>>>>> +       if (set == -1)
>>>>> +               return -EINVAL;
>>>>> +
>>>>> +       spin_lock_irq(q->queue_lock);
>>>>> +       if (set)
>>>>> +               queue_flag_set(QUEUE_FLAG_WC, q);
>>>>> +       else
>>>>> +               queue_flag_clear(QUEUE_FLAG_WC, q);
>>>>> +       spin_unlock_irq(q->queue_lock);
>>>>
>>>> When the write cache (queue)flag is changed, I guess we need to change
>>>> q->flush_flags via blk_queue_write_cache() too? Otherwise, the change
>>>> on write cache flag can't affect block flush behaviour.
>>>>
>>>> BTW, looks the flush_flags has been redundant after this patch, can
>>>> we just kill it in the future?
>>>
>>> How about we fold both those concerns into one? Kill off ->flush_flags,
>>> and that then fixes the other part as well. Like the below, untested.
>>
>> Minor screwup in target_core_iblock.c and a missing unsigned -> long
>> conversion in dm. Also changed the dm part to pass by value, not
>> reference.
>
> The patch Looks fine.

Sounds good, I'll queue it up.

> Also has another question about the sysfs interface for changing cache type:
> how to make kernel's view consistent with device's cache type setting?
>
> For example, someone changes cache type from writeback to writethrough
> by the sysfs interface, but the type inside device is still writeback, then may
> cause trouble after kernel doesn't handle flush.

I'm not too worried about that, it's more the fact that we lose the 
original driver signaled state when that happens. Might be a good idea 
to have a user settable flag, but still retain the driver setting.
diff mbox

Patch

diff --git a/block/blk-core.c b/block/blk-core.c
index c50227796a26..2475b1c72773 100644
--- a/block/blk-core.c
+++ b/block/blk-core.c
@@ -1964,7 +1964,8 @@  generic_make_request_checks(struct bio *bio)
 	 * drivers without flush support don't have to worry
 	 * about them.
 	 */
-	if ((bio->bi_rw & (REQ_FLUSH | REQ_FUA)) && !q->flush_flags) {
+	if ((bio->bi_rw & (REQ_FLUSH | REQ_FUA)) &&
+	    !test_bit(QUEUE_FLAG_WC, &q->queue_flags)) {
 		bio->bi_rw &= ~(REQ_FLUSH | REQ_FUA);
 		if (!nr_sectors) {
 			err = 0;
diff --git a/block/blk-flush.c b/block/blk-flush.c
index 9c423e53324a..b1c91d229e5e 100644
--- a/block/blk-flush.c
+++ b/block/blk-flush.c
@@ -95,17 +95,18 @@  enum {
 static bool blk_kick_flush(struct request_queue *q,
 			   struct blk_flush_queue *fq);
 
-static unsigned int blk_flush_policy(unsigned int fflags, struct request *rq)
+static unsigned int blk_flush_policy(unsigned long fflags, struct request *rq)
 {
 	unsigned int policy = 0;
 
 	if (blk_rq_sectors(rq))
 		policy |= REQ_FSEQ_DATA;
 
-	if (fflags & REQ_FLUSH) {
+	if (fflags & (1UL << QUEUE_FLAG_WC)) {
 		if (rq->cmd_flags & REQ_FLUSH)
 			policy |= REQ_FSEQ_PREFLUSH;
-		if (!(fflags & REQ_FUA) && (rq->cmd_flags & REQ_FUA))
+		if (!(fflags & (1UL << QUEUE_FLAG_FUA)) &&
+		    (rq->cmd_flags & REQ_FUA))
 			policy |= REQ_FSEQ_POSTFLUSH;
 	}
 	return policy;
@@ -384,7 +385,7 @@  static void mq_flush_data_end_io(struct request *rq, int error)
 void blk_insert_flush(struct request *rq)
 {
 	struct request_queue *q = rq->q;
-	unsigned int fflags = q->flush_flags;	/* may change, cache */
+	unsigned long fflags = q->queue_flags;	/* may change, cache */
 	unsigned int policy = blk_flush_policy(fflags, rq);
 	struct blk_flush_queue *fq = blk_get_flush_queue(q, rq->mq_ctx);
 
@@ -393,7 +394,7 @@  void blk_insert_flush(struct request *rq)
 	 * REQ_FLUSH and FUA for the driver.
 	 */
 	rq->cmd_flags &= ~REQ_FLUSH;
-	if (!(fflags & REQ_FUA))
+	if (!(fflags & (1UL << QUEUE_FLAG_FUA)))
 		rq->cmd_flags &= ~REQ_FUA;
 
 	/*
diff --git a/block/blk-settings.c b/block/blk-settings.c
index 80d9327a214b..f679ae122843 100644
--- a/block/blk-settings.c
+++ b/block/blk-settings.c
@@ -822,7 +822,12 @@  EXPORT_SYMBOL(blk_queue_update_dma_alignment);
 
 void blk_queue_flush_queueable(struct request_queue *q, bool queueable)
 {
-	q->flush_not_queueable = !queueable;
+	spin_lock_irq(q->queue_lock);
+	if (queueable)
+		clear_bit(QUEUE_FLAG_FLUSH_NQ, &q->queue_flags);
+	else
+		set_bit(QUEUE_FLAG_FLUSH_NQ, &q->queue_flags);
+	spin_unlock_irq(q->queue_lock);
 }
 EXPORT_SYMBOL_GPL(blk_queue_flush_queueable);
 
@@ -837,16 +842,13 @@  EXPORT_SYMBOL_GPL(blk_queue_flush_queueable);
 void blk_queue_write_cache(struct request_queue *q, bool wc, bool fua)
 {
 	spin_lock_irq(q->queue_lock);
-	if (wc) {
+	if (wc)
 		queue_flag_set(QUEUE_FLAG_WC, q);
-		q->flush_flags = REQ_FLUSH;
-	} else
+	else
 		queue_flag_clear(QUEUE_FLAG_WC, q);
-	if (fua) {
-		if (wc)
-			q->flush_flags |= REQ_FUA;
+	if (fua)
 		queue_flag_set(QUEUE_FLAG_FUA, q);
-	} else
+	else
 		queue_flag_clear(QUEUE_FLAG_FUA, q);
 	spin_unlock_irq(q->queue_lock);
 }
diff --git a/drivers/block/xen-blkback/xenbus.c b/drivers/block/xen-blkback/xenbus.c
index 26aa080e243c..3355f1cdd4e5 100644
--- a/drivers/block/xen-blkback/xenbus.c
+++ b/drivers/block/xen-blkback/xenbus.c
@@ -477,7 +477,7 @@  static int xen_vbd_create(struct xen_blkif *blkif, blkif_vdev_t handle,
 		vbd->type |= VDISK_REMOVABLE;
 
 	q = bdev_get_queue(bdev);
-	if (q && q->flush_flags)
+	if (q && test_bit(QUEUE_FLAG_WC, &q->queue_flags))
 		vbd->flush_support = true;
 
 	if (q && blk_queue_secdiscard(q))
diff --git a/drivers/md/dm-table.c b/drivers/md/dm-table.c
index 4b1ffc0abe11..626a5ec04466 100644
--- a/drivers/md/dm-table.c
+++ b/drivers/md/dm-table.c
@@ -1348,13 +1348,13 @@  static void dm_table_verify_integrity(struct dm_table *t)
 static int device_flush_capable(struct dm_target *ti, struct dm_dev *dev,
 				sector_t start, sector_t len, void *data)
 {
-	unsigned flush = (*(unsigned *)data);
+	unsigned long flush = (unsigned long) data;
 	struct request_queue *q = bdev_get_queue(dev->bdev);
 
-	return q && (q->flush_flags & flush);
+	return q && (q->queue_flags & flush);
 }
 
-static bool dm_table_supports_flush(struct dm_table *t, unsigned flush)
+static bool dm_table_supports_flush(struct dm_table *t, unsigned long flush)
 {
 	struct dm_target *ti;
 	unsigned i = 0;
@@ -1375,7 +1375,7 @@  static bool dm_table_supports_flush(struct dm_table *t, unsigned flush)
 			return true;
 
 		if (ti->type->iterate_devices &&
-		    ti->type->iterate_devices(ti, device_flush_capable, &flush))
+		    ti->type->iterate_devices(ti, device_flush_capable, (void *) flush))
 			return true;
 	}
 
@@ -1518,9 +1518,9 @@  void dm_table_set_restrictions(struct dm_table *t, struct request_queue *q,
 	else
 		queue_flag_set_unlocked(QUEUE_FLAG_DISCARD, q);
 
-	if (dm_table_supports_flush(t, REQ_FLUSH)) {
+	if (dm_table_supports_flush(t, (1UL << QUEUE_FLAG_WC))) {
 		wc = true;
-		if (dm_table_supports_flush(t, REQ_FUA))
+		if (dm_table_supports_flush(t, (1UL << QUEUE_FLAG_FUA)))
 			fua = true;
 	}
 	blk_queue_write_cache(q, wc, fua);
diff --git a/drivers/md/raid5-cache.c b/drivers/md/raid5-cache.c
index 9531f5f05b93..26f14970a858 100644
--- a/drivers/md/raid5-cache.c
+++ b/drivers/md/raid5-cache.c
@@ -1188,6 +1188,7 @@  ioerr:
 
 int r5l_init_log(struct r5conf *conf, struct md_rdev *rdev)
 {
+	struct request_queue *q = bdev_get_queue(rdev->bdev);
 	struct r5l_log *log;
 
 	if (PAGE_SIZE != 4096)
@@ -1197,7 +1198,7 @@  int r5l_init_log(struct r5conf *conf, struct md_rdev *rdev)
 		return -ENOMEM;
 	log->rdev = rdev;
 
-	log->need_cache_flush = (rdev->bdev->bd_disk->queue->flush_flags != 0);
+	log->need_cache_flush = test_bit(QUEUE_FLAG_WC, &q->queue_flags) != 0;
 
 	log->uuid_checksum = crc32c_le(~0, rdev->mddev->uuid,
 				       sizeof(rdev->mddev->uuid));
diff --git a/drivers/target/target_core_iblock.c b/drivers/target/target_core_iblock.c
index 026a758e5778..7c4efb4417b0 100644
--- a/drivers/target/target_core_iblock.c
+++ b/drivers/target/target_core_iblock.c
@@ -687,10 +687,10 @@  iblock_execute_rw(struct se_cmd *cmd, struct scatterlist *sgl, u32 sgl_nents,
 		 * Force writethrough using WRITE_FUA if a volatile write cache
 		 * is not enabled, or if initiator set the Force Unit Access bit.
 		 */
-		if (q->flush_flags & REQ_FUA) {
+		if (test_bit(QUEUE_FLAG_FUA, &q->queue_flags)) {
 			if (cmd->se_cmd_flags & SCF_FUA)
 				rw = WRITE_FUA;
-			else if (!(q->flush_flags & REQ_FLUSH))
+			else if (!test_bit(QUEUE_FLAG_WC, &q->queue_flags))
 				rw = WRITE_FUA;
 			else
 				rw = WRITE;
@@ -836,7 +836,7 @@  static bool iblock_get_write_cache(struct se_device *dev)
 	struct block_device *bd = ib_dev->ibd_bd;
 	struct request_queue *q = bdev_get_queue(bd);
 
-	return q->flush_flags & REQ_FLUSH;
+	return test_bit(QUEUE_FLAG_WC, &q->queue_flags);
 }
 
 static const struct target_backend_ops iblock_ops = {
diff --git a/include/linux/blkdev.h b/include/linux/blkdev.h
index f3f232fa505d..57c085917da6 100644
--- a/include/linux/blkdev.h
+++ b/include/linux/blkdev.h
@@ -433,8 +433,6 @@  struct request_queue {
 	/*
 	 * for flush operations
 	 */
-	unsigned int		flush_flags;
-	unsigned int		flush_not_queueable:1;
 	struct blk_flush_queue	*fq;
 
 	struct list_head	requeue_list;
@@ -493,6 +491,7 @@  struct request_queue {
 #define QUEUE_FLAG_POLL	       22	/* IO polling enabled if set */
 #define QUEUE_FLAG_WC	       23	/* Write back caching */
 #define QUEUE_FLAG_FUA	       24	/* device supports FUA writes */
+#define QUEUE_FLAG_FLUSH_NQ    25	/* flush not queueuable */
 
 #define QUEUE_FLAG_DEFAULT	((1 << QUEUE_FLAG_IO_STAT) |		\
 				 (1 << QUEUE_FLAG_STACKABLE)	|	\
@@ -1365,7 +1364,7 @@  static inline unsigned int block_size(struct block_device *bdev)
 
 static inline bool queue_flush_queueable(struct request_queue *q)
 {
-	return !q->flush_not_queueable;
+	return !test_bit(QUEUE_FLAG_FLUSH_NQ, &q->queue_flags);
 }
 
 typedef struct {struct page *v;} Sector;