@@ -60,12 +60,12 @@ int iomap_dio_iopoll(struct kiocb *kiocb, bool spin)
EXPORT_SYMBOL_GPL(iomap_dio_iopoll);
static void iomap_dio_submit_bio(struct iomap_dio *dio, struct iomap
*iomap,
- struct bio *bio, loff_t pos)
+ struct bio *bio, loff_t pos, bool split)
{
atomic_inc(&dio->ref);
if (dio->iocb->ki_flags & IOCB_HIPRI)
- bio_set_polled(bio, dio->iocb);
+ bio_set_polled(bio, dio->iocb, split);
dio->submit.last_queue = bdev_get_queue(iomap->bdev);
if (dio->dops && dio->dops->submit_io)
@@ -214,6 +214,7 @@ iomap_dio_bio_actor(struct inode *inode, loff_t pos,
loff_t length,
int nr_pages, ret = 0;
size_t copied = 0;
size_t orig_count;
+ bool split = false;
if ((pos | length | align) & ((1 << blkbits) - 1))
return -EINVAL;
@@ -309,7 +310,17 @@ iomap_dio_bio_actor(struct inode *inode, loff_t
pos, loff_t length,
copied += n;
nr_pages = iov_iter_npages(dio->submit.iter,
BIO_MAX_PAGES);
- iomap_dio_submit_bio(dio, iomap, bio, pos);
+ /*
+ * The current dio needs to be split into multiple bios
here.
+ * iopoll for split bio will cause subtle trouble such as
+ * hang when doing sync polling, while iopoll is initially
+ * for small size, latency sensitive IO. Thus disable iopoll
+ * if split needed.
+ */
+ if (nr_pages)
+ split = true;
+
+ iomap_dio_submit_bio(dio, iomap, bio, pos, split);
pos += n;
} while (nr_pages);
@@ -806,9 +806,11 @@ static inline int bio_integrity_add_page(struct bio
*bio, struct page *page,
* must be found by the caller. This is different than IRQ driven IO,
where
* it's safe to wait for IO to complete.
*/
-static inline void bio_set_polled(struct bio *bio, struct kiocb *kiocb)
+static inline void bio_set_polled(struct bio *bio, struct kiocb *kiocb,
bool split)
{
- bio->bi_opf |= REQ_HIPRI;
+ if (!split)
+ bio->bi_opf |= REQ_HIPRI;
+