@@ -203,7 +203,7 @@ static int bio_copy_user_iov(struct request *rq, struct rq_map_data *map_data,
/*
* success
*/
- if ((iov_iter_rw(iter) == WRITE &&
+ if ((op_is_write(rq->cmd_flags) &&
(!map_data || !map_data->null_mapped)) ||
(map_data && map_data->from_user)) {
ret = bio_copy_from_iter(bio, iter);
@@ -73,7 +73,7 @@ static ssize_t __blkdev_direct_IO_simple(struct kiocb *iocb,
return -ENOMEM;
}
- if (iov_iter_rw(iter) == READ) {
+ if (iocb_is_read(iocb)) {
bio_init(&bio, bdev, vecs, nr_pages, REQ_OP_READ);
if (user_backed_iter(iter))
should_dirty = true;
@@ -88,7 +88,7 @@ static ssize_t __blkdev_direct_IO_simple(struct kiocb *iocb,
goto out;
ret = bio.bi_iter.bi_size;
- if (iov_iter_rw(iter) == WRITE)
+ if (iocb_is_write(iocb))
task_io_account_write(ret);
if (iocb->ki_flags & IOCB_NOWAIT)
@@ -174,7 +174,7 @@ static ssize_t __blkdev_direct_IO(struct kiocb *iocb, struct iov_iter *iter,
struct blk_plug plug;
struct blkdev_dio *dio;
struct bio *bio;
- bool is_read = (iov_iter_rw(iter) == READ), is_sync;
+ bool is_read = iocb_is_read(iocb), is_sync;
blk_opf_t opf = is_read ? REQ_OP_READ : dio_bio_write_op(iocb);
loff_t pos = iocb->ki_pos;
int ret = 0;
@@ -296,7 +296,7 @@ static ssize_t __blkdev_direct_IO_async(struct kiocb *iocb,
unsigned int nr_pages)
{
struct block_device *bdev = iocb->ki_filp->private_data;
- bool is_read = iov_iter_rw(iter) == READ;
+ bool is_read = iocb_is_read(iocb);
blk_opf_t opf = is_read ? REQ_OP_READ : dio_bio_write_op(iocb);
struct blkdev_dio *dio;
struct bio *bio;
@@ -254,7 +254,7 @@ v9fs_direct_IO(struct kiocb *iocb, struct iov_iter *iter)
ssize_t n;
int err = 0;
- if (iov_iter_rw(iter) == WRITE) {
+ if (iocb_is_write(iocb)) {
n = p9_client_write(file->private_data, pos, iter, &err);
if (n) {
struct inode *inode = file_inode(file);
@@ -400,7 +400,7 @@ affs_direct_IO(struct kiocb *iocb, struct iov_iter *iter)
loff_t offset = iocb->ki_pos;
ssize_t ret;
- if (iov_iter_rw(iter) == WRITE) {
+ if (iocb_is_write(iocb)) {
loff_t size = offset + count;
if (AFFS_I(inode)->mmu_private < size)
@@ -408,7 +408,7 @@ affs_direct_IO(struct kiocb *iocb, struct iov_iter *iter)
}
ret = blockdev_direct_IO(iocb, inode, iter, affs_get_block);
- if (ret < 0 && iov_iter_rw(iter) == WRITE)
+ if (ret < 0 && iocb_is_write(iocb))
affs_write_failed(mapping, offset + count);
return ret;
}
@@ -1284,7 +1284,7 @@ ceph_direct_read_write(struct kiocb *iocb, struct iov_iter *iter,
struct timespec64 mtime = current_time(inode);
size_t count = iov_iter_count(iter);
loff_t pos = iocb->ki_pos;
- bool write = iov_iter_rw(iter) == WRITE;
+ bool write = iocb_is_write(iocb);
bool should_dirty = !write && user_backed_iter(iter);
if (write && ceph_snap(file_inode(file)) != CEPH_NOSNAP)
@@ -1938,14 +1938,6 @@ int smbd_recv(struct smbd_connection *info, struct msghdr *msg)
unsigned int to_read, page_offset;
int rc;
- if (iov_iter_rw(&msg->msg_iter) == WRITE) {
- /* It's a bug in upper layer to get there */
- cifs_dbg(VFS, "Invalid msg iter dir %u\n",
- iov_iter_rw(&msg->msg_iter));
- rc = -EINVAL;
- goto out;
- }
-
switch (iov_iter_type(&msg->msg_iter)) {
case ITER_KVEC:
buf = msg->msg_iter.kvec->iov_base;
@@ -1967,7 +1959,6 @@ int smbd_recv(struct smbd_connection *info, struct msghdr *msg)
rc = -EINVAL;
}
-out:
/* SMBDirect will read it all or nothing */
if (rc > 0)
msg->msg_iter.count = 0;
@@ -1405,7 +1405,7 @@ static loff_t dax_iomap_iter(const struct iomap_iter *iomi,
loff_t pos = iomi->pos;
struct dax_device *dax_dev = iomap->dax_dev;
loff_t end = pos + length, done = 0;
- bool write = iov_iter_rw(iter) == WRITE;
+ bool write = iomi->flags & IOMAP_WRITE;
bool cow = write && iomap->flags & IOMAP_F_SHARED;
ssize_t ret = 0;
size_t xfer;
@@ -1455,7 +1455,7 @@ static loff_t dax_iomap_iter(const struct iomap_iter *iomi,
map_len = dax_direct_access(dax_dev, pgoff, PHYS_PFN(size),
DAX_ACCESS, &kaddr, NULL);
- if (map_len == -EIO && iov_iter_rw(iter) == WRITE) {
+ if (map_len == -EIO && write) {
map_len = dax_direct_access(dax_dev, pgoff,
PHYS_PFN(size), DAX_RECOVERY_WRITE,
&kaddr, NULL);
@@ -1530,7 +1530,7 @@ dax_iomap_rw(struct kiocb *iocb, struct iov_iter *iter,
if (!iomi.len)
return 0;
- if (iov_iter_rw(iter) == WRITE) {
+ if (iocb_is_write(iocb)) {
lockdep_assert_held_write(&iomi.inode->i_rwsem);
iomi.flags |= IOMAP_WRITE;
} else {
@@ -1143,7 +1143,7 @@ ssize_t __blockdev_direct_IO(struct kiocb *iocb, struct inode *inode,
*/
/* watch out for a 0 len io from a tricksy fs */
- if (iov_iter_rw(iter) == READ && !count)
+ if (iocb_is_read(iocb) && !count)
return 0;
dio = kmem_cache_alloc(dio_cache, GFP_KERNEL);
@@ -1157,14 +1157,14 @@ ssize_t __blockdev_direct_IO(struct kiocb *iocb, struct inode *inode,
memset(dio, 0, offsetof(struct dio, pages));
dio->flags = flags;
- if (dio->flags & DIO_LOCKING && iov_iter_rw(iter) == READ) {
+ if (dio->flags & DIO_LOCKING && iocb_is_read(iocb)) {
/* will be released by direct_io_worker */
inode_lock(inode);
}
/* Once we sampled i_size check for reads beyond EOF */
dio->i_size = i_size_read(inode);
- if (iov_iter_rw(iter) == READ && offset >= dio->i_size) {
+ if (iocb_is_read(iocb) && offset >= dio->i_size) {
retval = 0;
goto fail_dio;
}
@@ -1177,7 +1177,7 @@ ssize_t __blockdev_direct_IO(struct kiocb *iocb, struct inode *inode,
goto fail_dio;
}
- if (dio->flags & DIO_LOCKING && iov_iter_rw(iter) == READ) {
+ if (dio->flags & DIO_LOCKING && iocb_is_read(iocb)) {
struct address_space *mapping = iocb->ki_filp->f_mapping;
retval = filemap_write_and_wait_range(mapping, offset, end - 1);
@@ -1193,13 +1193,13 @@ ssize_t __blockdev_direct_IO(struct kiocb *iocb, struct inode *inode,
*/
if (is_sync_kiocb(iocb))
dio->is_async = false;
- else if (iov_iter_rw(iter) == WRITE && end > i_size_read(inode))
+ else if (iocb_is_write(iocb) && end > i_size_read(inode))
dio->is_async = false;
else
dio->is_async = true;
dio->inode = inode;
- if (iov_iter_rw(iter) == WRITE) {
+ if (iocb_is_write(iocb)) {
dio->opf = REQ_OP_WRITE | REQ_SYNC | REQ_IDLE;
if (iocb->ki_flags & IOCB_NOWAIT)
dio->opf |= REQ_NOWAIT;
@@ -1211,7 +1211,7 @@ ssize_t __blockdev_direct_IO(struct kiocb *iocb, struct inode *inode,
* For AIO O_(D)SYNC writes we need to defer completions to a workqueue
* so that we can call ->fsync.
*/
- if (dio->is_async && iov_iter_rw(iter) == WRITE) {
+ if (dio->is_async && iocb_is_write(iocb)) {
retval = 0;
if (iocb_is_dsync(iocb))
retval = dio_set_defer_completion(dio);
@@ -1248,7 +1248,7 @@ ssize_t __blockdev_direct_IO(struct kiocb *iocb, struct inode *inode,
spin_lock_init(&dio->bio_lock);
dio->refcount = 1;
- dio->should_dirty = user_backed_iter(iter) && iov_iter_rw(iter) == READ;
+ dio->should_dirty = user_backed_iter(iter) && iocb_is_read(iocb);
sdio.iter = iter;
sdio.final_block_in_request = end >> blkbits;
@@ -1305,7 +1305,7 @@ ssize_t __blockdev_direct_IO(struct kiocb *iocb, struct inode *inode,
* we can let i_mutex go now that its achieved its purpose
* of protecting us from looking up uninitialized blocks.
*/
- if (iov_iter_rw(iter) == READ && (dio->flags & DIO_LOCKING))
+ if (iocb_is_read(iocb) && (dio->flags & DIO_LOCKING))
inode_unlock(dio->inode);
/*
@@ -1317,7 +1317,7 @@ ssize_t __blockdev_direct_IO(struct kiocb *iocb, struct inode *inode,
*/
BUG_ON(retval == -EIOCBQUEUED);
if (dio->is_async && retval == 0 && dio->result &&
- (iov_iter_rw(iter) == READ || dio->result == count))
+ (iocb_is_read(iocb) || dio->result == count))
retval = -EIOCBQUEUED;
else
dio_await_completion(dio);
@@ -1330,7 +1330,7 @@ ssize_t __blockdev_direct_IO(struct kiocb *iocb, struct inode *inode,
return retval;
fail_dio:
- if (dio->flags & DIO_LOCKING && iov_iter_rw(iter) == READ)
+ if (dio->flags & DIO_LOCKING && iocb_is_read(iocb))
inode_unlock(inode);
kmem_cache_free(dio_cache, dio);
@@ -412,10 +412,10 @@ static ssize_t exfat_direct_IO(struct kiocb *iocb, struct iov_iter *iter)
struct address_space *mapping = iocb->ki_filp->f_mapping;
struct inode *inode = mapping->host;
loff_t size = iocb->ki_pos + iov_iter_count(iter);
- int rw = iov_iter_rw(iter);
+ bool writing = iocb_is_write(iocb);
ssize_t ret;
- if (rw == WRITE) {
+ if (writing) {
/*
* FIXME: blockdev_direct_IO() doesn't use ->write_begin(),
* so we need to update the ->i_size_aligned to block boundary.
@@ -434,7 +434,7 @@ static ssize_t exfat_direct_IO(struct kiocb *iocb, struct iov_iter *iter)
* condition of exfat_get_block() and ->truncate().
*/
ret = blockdev_direct_IO(iocb, inode, iter, exfat_get_block);
- if (ret < 0 && (rw & WRITE))
+ if (ret < 0 && writing)
exfat_write_failed(mapping, size);
return ret;
}
@@ -919,7 +919,7 @@ ext2_direct_IO(struct kiocb *iocb, struct iov_iter *iter)
ssize_t ret;
ret = blockdev_direct_IO(iocb, inode, iter, ext2_get_block);
- if (ret < 0 && iov_iter_rw(iter) == WRITE)
+ if (ret < 0 && iocb_is_write(iocb))
ext2_write_failed(mapping, offset + count);
return ret;
}
@@ -809,7 +809,7 @@ int f2fs_truncate(struct inode *inode)
return 0;
}
-static bool f2fs_force_buffered_io(struct inode *inode, int rw)
+static bool f2fs_force_buffered_io(struct inode *inode, bool writing)
{
struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
@@ -827,9 +827,9 @@ static bool f2fs_force_buffered_io(struct inode *inode, int rw)
* for blkzoned device, fallback direct IO to buffered IO, so
* all IOs can be serialized by log-structured write.
*/
- if (f2fs_sb_has_blkzoned(sbi) && (rw == WRITE))
+ if (f2fs_sb_has_blkzoned(sbi) && writing)
return true;
- if (f2fs_lfs_mode(sbi) && rw == WRITE && F2FS_IO_ALIGNED(sbi))
+ if (f2fs_lfs_mode(sbi) && writing && F2FS_IO_ALIGNED(sbi))
return true;
if (is_sbi_flag_set(sbi, SBI_CP_DISABLED))
return true;
@@ -865,7 +865,7 @@ int f2fs_getattr(struct user_namespace *mnt_userns, const struct path *path,
unsigned int bsize = i_blocksize(inode);
stat->result_mask |= STATX_DIOALIGN;
- if (!f2fs_force_buffered_io(inode, WRITE)) {
+ if (!f2fs_force_buffered_io(inode, true)) {
stat->dio_mem_align = bsize;
stat->dio_offset_align = bsize;
}
@@ -4254,7 +4254,7 @@ static bool f2fs_should_use_dio(struct inode *inode, struct kiocb *iocb,
if (!(iocb->ki_flags & IOCB_DIRECT))
return false;
- if (f2fs_force_buffered_io(inode, iov_iter_rw(iter)))
+ if (f2fs_force_buffered_io(inode, iocb_is_write(iocb)))
return false;
/*
@@ -261,7 +261,7 @@ static ssize_t fat_direct_IO(struct kiocb *iocb, struct iov_iter *iter)
loff_t offset = iocb->ki_pos;
ssize_t ret;
- if (iov_iter_rw(iter) == WRITE) {
+ if (iocb_is_write(iocb)) {
/*
* FIXME: blockdev_direct_IO() doesn't use ->write_begin(),
* so we need to update the ->mmu_private to block boundary.
@@ -281,7 +281,7 @@ static ssize_t fat_direct_IO(struct kiocb *iocb, struct iov_iter *iter)
* condition of fat_get_block() and ->truncate().
*/
ret = blockdev_direct_IO(iocb, inode, iter, fat_get_block);
- if (ret < 0 && iov_iter_rw(iter) == WRITE)
+ if (ret < 0 && iocb_is_write(iocb))
fat_write_failed(mapping, offset + count);
return ret;
@@ -720,7 +720,7 @@ static bool file_extending_write(struct kiocb *iocb, struct iov_iter *from)
{
struct inode *inode = file_inode(iocb->ki_filp);
- return (iov_iter_rw(from) == WRITE &&
+ return (iocb_is_write(iocb) &&
((iocb->ki_pos) >= i_size_read(inode) ||
(iocb->ki_pos + iov_iter_count(from) > i_size_read(inode))));
}
@@ -2897,7 +2897,7 @@ fuse_direct_IO(struct kiocb *iocb, struct iov_iter *iter)
inode = file->f_mapping->host;
i_size = i_size_read(inode);
- if ((iov_iter_rw(iter) == READ) && (offset >= i_size))
+ if (iocb_is_read(iocb) && (offset >= i_size))
return 0;
io = kmalloc(sizeof(struct fuse_io_priv), GFP_KERNEL);
@@ -2909,7 +2909,7 @@ fuse_direct_IO(struct kiocb *iocb, struct iov_iter *iter)
io->bytes = -1;
io->size = 0;
io->offset = offset;
- io->write = (iov_iter_rw(iter) == WRITE);
+ io->write = iocb_is_write(iocb);
io->err = 0;
/*
* By default, we want to optimize all I/Os with async request
@@ -2942,7 +2942,7 @@ fuse_direct_IO(struct kiocb *iocb, struct iov_iter *iter)
io->done = &wait;
}
- if (iov_iter_rw(iter) == WRITE) {
+ if (iocb_is_write(iocb)) {
ret = fuse_direct_io(io, iter, &pos, FUSE_DIO_WRITE);
fuse_invalidate_attr_mask(inode, FUSE_STATX_MODSIZE);
} else {
@@ -2965,7 +2965,7 @@ fuse_direct_IO(struct kiocb *iocb, struct iov_iter *iter)
kref_put(&io->refcnt, fuse_io_release);
- if (iov_iter_rw(iter) == WRITE) {
+ if (iocb_is_write(iocb)) {
fuse_write_update_attr(inode, pos, ret);
/* For extending writes we already hold exclusive lock */
if (ret < 0 && offset + count > i_size)
@@ -141,7 +141,7 @@ static ssize_t hfs_direct_IO(struct kiocb *iocb, struct iov_iter *iter)
* In case of error extending write may have instantiated a few
* blocks outside i_size. Trim these off again.
*/
- if (unlikely(iov_iter_rw(iter) == WRITE && ret < 0)) {
+ if (unlikely(iocb_is_write(iocb) && ret < 0)) {
loff_t isize = i_size_read(inode);
loff_t end = iocb->ki_pos + count;
@@ -138,7 +138,7 @@ static ssize_t hfsplus_direct_IO(struct kiocb *iocb, struct iov_iter *iter)
* In case of error extending write may have instantiated a few
* blocks outside i_size. Trim these off again.
*/
- if (unlikely(iov_iter_rw(iter) == WRITE && ret < 0)) {
+ if (unlikely(iocb_is_write(iocb) && ret < 0)) {
loff_t isize = i_size_read(inode);
loff_t end = iocb->ki_pos + count;
@@ -519,7 +519,7 @@ __iomap_dio_rw(struct kiocb *iocb, struct iov_iter *iter,
dio->submit.waiter = current;
dio->submit.poll_bio = NULL;
- if (iov_iter_rw(iter) == READ) {
+ if (iocb_is_read(iocb)) {
if (iomi.pos >= dio->i_size)
goto out_free_dio;
@@ -573,7 +573,7 @@ __iomap_dio_rw(struct kiocb *iocb, struct iov_iter *iter,
if (ret)
goto out_free_dio;
- if (iov_iter_rw(iter) == WRITE) {
+ if (iomi.flags & IOMAP_WRITE) {
/*
* Try to invalidate cache pages for the range we are writing.
* If this invalidation fails, let the caller fall back to
@@ -613,7 +613,7 @@ __iomap_dio_rw(struct kiocb *iocb, struct iov_iter *iter,
* Revert iter to a state corresponding to that as some callers (such
* as the splice code) rely on it.
*/
- if (iov_iter_rw(iter) == READ && iomi.pos >= dio->i_size)
+ if (!(iomi.flags & IOMAP_WRITE) && iomi.pos >= dio->i_size)
iov_iter_revert(iter, iomi.pos - dio->i_size);
if (ret == -EFAULT && dio->size && (dio_flags & IOMAP_DIO_PARTIAL)) {
@@ -334,7 +334,7 @@ static ssize_t jfs_direct_IO(struct kiocb *iocb, struct iov_iter *iter)
* In case of error extending write may have instantiated a few
* blocks outside i_size. Trim these off again.
*/
- if (unlikely(iov_iter_rw(iter) == WRITE && ret < 0)) {
+ if (unlikely(iocb_is_write(iocb) && ret < 0)) {
loff_t isize = i_size_read(inode);
loff_t end = iocb->ki_pos + count;
@@ -133,7 +133,7 @@ int nfs_swap_rw(struct kiocb *iocb, struct iov_iter *iter)
VM_BUG_ON(iov_iter_count(iter) != PAGE_SIZE);
- if (iov_iter_rw(iter) == READ)
+ if (iocb_is_read(iocb))
ret = nfs_file_direct_read(iocb, iter, true);
else
ret = nfs_file_direct_write(iocb, iter, true);
@@ -289,7 +289,7 @@ nilfs_direct_IO(struct kiocb *iocb, struct iov_iter *iter)
{
struct inode *inode = file_inode(iocb->ki_filp);
- if (iov_iter_rw(iter) == WRITE)
+ if (iocb_is_write(iocb))
return 0;
/* Needs synchronization with the cleaner */
@@ -761,7 +761,7 @@ static ssize_t ntfs_direct_IO(struct kiocb *iocb, struct iov_iter *iter)
struct ntfs_inode *ni = ntfs_i(inode);
loff_t vbo = iocb->ki_pos;
loff_t end;
- int wr = iov_iter_rw(iter) & WRITE;
+ bool wr = iocb_is_write(iocb);
size_t iter_count = iov_iter_count(iter);
loff_t valid;
ssize_t ret;
@@ -2441,7 +2441,7 @@ static ssize_t ocfs2_direct_IO(struct kiocb *iocb, struct iov_iter *iter)
!ocfs2_supports_append_dio(osb))
return 0;
- if (iov_iter_rw(iter) == READ)
+ if (iocb_is_read(iocb))
get_block = ocfs2_lock_get_block;
else
get_block = ocfs2_dio_wr_get_block;
@@ -521,7 +521,7 @@ static ssize_t orangefs_direct_IO(struct kiocb *iocb,
*/
struct file *file = iocb->ki_filp;
loff_t pos = iocb->ki_pos;
- enum ORANGEFS_io_type type = iov_iter_rw(iter) == WRITE ?
+ enum ORANGEFS_io_type type = iocb_is_write(iocb) ?
ORANGEFS_IO_WRITE : ORANGEFS_IO_READ;
loff_t *offset = &pos;
struct inode *inode = file->f_mapping->host;
@@ -3249,7 +3249,7 @@ static ssize_t reiserfs_direct_IO(struct kiocb *iocb, struct iov_iter *iter)
* In case of error extending write may have instantiated a few
* blocks outside i_size. Trim these off again.
*/
- if (unlikely(iov_iter_rw(iter) == WRITE && ret < 0)) {
+ if (unlikely(iocb_is_write(iocb) && ret < 0)) {
loff_t isize = i_size_read(inode);
loff_t end = iocb->ki_pos + count;
@@ -219,7 +219,7 @@ static ssize_t udf_direct_IO(struct kiocb *iocb, struct iov_iter *iter)
ssize_t ret;
ret = blockdev_direct_IO(iocb, inode, iter, udf_get_block);
- if (unlikely(ret < 0 && iov_iter_rw(iter) == WRITE))
+ if (unlikely(ret < 0 && iocb_is_write(iocb)))
udf_write_failed(mapping, iocb->ki_pos + count);
return ret;
}
@@ -353,6 +353,16 @@ static inline bool is_sync_kiocb(struct kiocb *kiocb)
return kiocb->ki_complete == NULL;
}
+static inline bool iocb_is_write(const struct kiocb *kiocb)
+{
+ return kiocb->ki_flags & IOCB_WRITE;
+}
+
+static inline bool iocb_is_read(const struct kiocb *kiocb)
+{
+ return !iocb_is_write(kiocb);
+}
+
struct address_space_operations {
int (*writepage)(struct page *page, struct writeback_control *wbc);
int (*read_folio)(struct file *, struct folio *);
@@ -114,11 +114,6 @@ static inline bool iov_iter_is_xarray(const struct iov_iter *i)
return iov_iter_type(i) == ITER_XARRAY;
}
-static inline unsigned char iov_iter_rw(const struct iov_iter *i)
-{
- return i->data_source ? WRITE : READ;
-}
-
static inline bool user_backed_iter(const struct iov_iter *i)
{
return i->user_backed;
@@ -1429,6 +1429,11 @@ static struct page *first_bvec_segment(const struct iov_iter *i,
return page;
}
+static unsigned char iov_iter_rw(const struct iov_iter *i)
+{
+ return i->data_source ? WRITE : READ;
+}
+
static ssize_t __iov_iter_get_pages_alloc(struct iov_iter *i,
struct page ***pages, size_t maxsize,
unsigned int maxpages, size_t *start,
Use information other than the iterator direction to determine the direction of the I/O: (*) If a kiocb is available, use the IOCB_WRITE flag. (*) If an iomap_iter is available, use the IOMAP_WRITE flag. (*) If a request is available, use op_is_write(). Drop the check on the iterator in smbd_recv() and its warning. This leaves __iov_iter_get_pages_alloc() the only user of iov_iter_rw(), so move it there and uninline it. Changes: ======== ver #6) - Move to the front of the patchset. - Added iocb_is_read() and iocb_is_write() to check IOCB_WRITE. - Use op_is_write() in bio_copy_user_iov(). - Drop the checks from smbd_recv(). Signed-off-by: David Howells <dhowells@redhat.com> cc: Al Viro <viro@zeniv.linux.org.uk> Link: https://lore.kernel.org/r/167305163159.1521586.9460968250704377087.stgit@warthog.procyon.org.uk/ # v4 Link: https://lore.kernel.org/r/167344727810.2425628.4715663653893036683.stgit@warthog.procyon.org.uk/ # v5 --- block/blk-map.c | 2 +- block/fops.c | 8 ++++---- fs/9p/vfs_addr.c | 2 +- fs/affs/file.c | 4 ++-- fs/ceph/file.c | 2 +- fs/cifs/smbdirect.c | 9 --------- fs/dax.c | 6 +++--- fs/direct-io.c | 22 +++++++++++----------- fs/exfat/inode.c | 6 +++--- fs/ext2/inode.c | 2 +- fs/f2fs/file.c | 10 +++++----- fs/fat/inode.c | 4 ++-- fs/fuse/dax.c | 2 +- fs/fuse/file.c | 8 ++++---- fs/hfs/inode.c | 2 +- fs/hfsplus/inode.c | 2 +- fs/iomap/direct-io.c | 6 +++--- fs/jfs/inode.c | 2 +- fs/nfs/direct.c | 2 +- fs/nilfs2/inode.c | 2 +- fs/ntfs3/inode.c | 2 +- fs/ocfs2/aops.c | 2 +- fs/orangefs/inode.c | 2 +- fs/reiserfs/inode.c | 2 +- fs/udf/inode.c | 2 +- include/linux/fs.h | 10 ++++++++++ include/linux/uio.h | 5 ----- lib/iov_iter.c | 5 +++++ 28 files changed, 67 insertions(+), 66 deletions(-)