@@ -568,6 +568,46 @@ cifs_do_mount(struct file_system_type *fs_type,
return dget(sb->s_root);
}
+static ssize_t cifs_file_aio_read(struct kiocb *iocb, const struct iovec *iov,
+ unsigned long nr_segs, loff_t pos)
+{
+ struct inode *inode;
+ struct cifs_sb_info *cifs_sb;
+ ssize_t read = 0, retval;
+ unsigned long i;
+
+ inode = iocb->ki_filp->f_path.dentry->d_inode;
+ cifs_sb = CIFS_SB(iocb->ki_filp->f_path.dentry->d_sb);
+
+ if ((cifs_sb->mnt_cifs_flags & CIFS_MOUNT_STRICT_IO) == 0 ||
+ CIFS_I(inode)->clientCanCacheRead)
+ return generic_file_aio_read(iocb, iov, nr_segs, pos);
+
+ /*
+ * In strict cache mode we need to read from the server all the time
+ * if we don't have level II oplock because the server can delay mtime
+ * change - so we can't make a decision about inode invalidating.
+ * And we can also fail with pagereading if there are mandatory locks
+ * on pages affected by this read but not on the region from pos to
+ * pos+len-1.
+ */
+
+ for (i = 0; i < nr_segs; i++) {
+ retval = cifs_user_read(iocb->ki_filp, iov[i].iov_base,
+ iov[i].iov_len, &pos);
+ if (retval < 0) {
+ read = read ? read : retval;
+ break;
+ }
+
+ read += retval;
+ }
+
+ iocb->ki_pos = pos;
+
+ return read;
+}
+
static ssize_t cifs_file_aio_write(struct kiocb *iocb, const struct iovec *iov,
unsigned long nr_segs, loff_t pos)
{
@@ -690,7 +730,7 @@ const struct inode_operations cifs_symlink_inode_ops = {
const struct file_operations cifs_file_ops = {
.read = do_sync_read,
.write = do_sync_write,
- .aio_read = generic_file_aio_read,
+ .aio_read = cifs_file_aio_read,
.aio_write = cifs_file_aio_write,
.open = cifs_open,
.release = cifs_close,
@@ -727,7 +767,7 @@ const struct file_operations cifs_file_direct_ops = {
const struct file_operations cifs_file_nobrl_ops = {
.read = do_sync_read,
.write = do_sync_write,
- .aio_read = generic_file_aio_read,
+ .aio_read = cifs_file_aio_read,
.aio_write = cifs_file_aio_write,
.open = cifs_open,
.release = cifs_close,