@@ -1270,15 +1270,13 @@ static void ll_io_init(struct cl_io *io, const struct file *file, int write)
* doesn't make the situation worse on single node but it may interleave write
* results from multiple nodes due to short read handling in ll_file_aio_read().
*
- * @env: lu_env
* @iocb: kiocb from kernel
* @iter: user space buffers where the data will be copied
*
* Returns: number of bytes have been read, or error code if error occurred.
*/
static ssize_t
-ll_do_fast_read(const struct lu_env *env, struct kiocb *iocb,
- struct iov_iter *iter)
+ll_do_fast_read(struct kiocb *iocb, struct iov_iter *iter)
{
ssize_t result;
@@ -1292,9 +1290,7 @@ static void ll_io_init(struct cl_io *io, const struct file *file, int write)
if (iocb->ki_filp->f_flags & O_DIRECT)
return 0;
- ll_cl_add(iocb->ki_filp, env, NULL, LCC_RW);
result = generic_file_read_iter(iocb, iter);
- ll_cl_remove(iocb->ki_filp, env);
/*
* If the first page is not in cache, generic_file_aio_read() will be
@@ -1319,14 +1315,14 @@ static ssize_t ll_file_read_iter(struct kiocb *iocb, struct iov_iter *to)
u16 refcheck;
ssize_t rc2;
+ result = ll_do_fast_read(iocb, to);
+ if (result < 0 || iov_iter_count(to) == 0)
+ goto out;
+
env = cl_env_get(&refcheck);
if (IS_ERR(env))
return PTR_ERR(env);
- result = ll_do_fast_read(env, iocb, to);
- if (result < 0 || iov_iter_count(to) == 0)
- goto out;
-
args = ll_env_args(env);
args->u.normal.via_iter = to;
args->u.normal.via_iocb = iocb;
@@ -1338,8 +1334,8 @@ static ssize_t ll_file_read_iter(struct kiocb *iocb, struct iov_iter *to)
else if (result == 0)
result = rc2;
-out:
cl_env_put(env, &refcheck);
+out:
return result;
}
@@ -1158,22 +1158,21 @@ int ll_readpage(struct file *file, struct page *vmpage)
{
struct cl_object *clob = ll_i2info(file_inode(file))->lli_clob;
struct ll_cl_context *lcc;
- const struct lu_env *env;
- struct cl_io *io;
+ const struct lu_env *env = NULL;
+ struct cl_io *io = NULL;
struct cl_page *page;
int result;
lcc = ll_cl_find(file);
- if (!lcc) {
- unlock_page(vmpage);
- return -EIO;
+ if (lcc) {
+ env = lcc->lcc_env;
+ io = lcc->lcc_io;
}
- env = lcc->lcc_env;
- io = lcc->lcc_io;
if (!io) { /* fast read */
struct ll_file_data *fd = LUSTRE_FPRIVATE(file);
struct ll_readahead_state *ras = &fd->fd_ras;
+ struct lu_env *local_env = NULL;
struct inode *inode = file_inode(file);
struct vvp_page *vpg;
@@ -1189,11 +1188,16 @@ int ll_readpage(struct file *file, struct page *vmpage)
return result;
}
+ if (!env) {
+ local_env = cl_env_percpu_get();
+ env = local_env;
+ }
+
vpg = cl2vvp_page(cl_object_page_slice(page->cp_obj, page));
if (vpg->vpg_defer_uptodate) {
enum ras_update_flags flags = LL_RAS_HIT;
- if (lcc->lcc_type == LCC_MMAP)
+ if (lcc && lcc->lcc_type == LCC_MMAP)
flags |= LL_RAS_MMAP;
/*
@@ -1220,8 +1224,15 @@ int ll_readpage(struct file *file, struct page *vmpage)
}
}
- unlock_page(vmpage);
+ /* release page refcount before unlocking the page to ensure
+ * the object won't be destroyed in the calling path of
+ * cl_page_put(). Please see comment in ll_releasepage().
+ */
cl_page_put(env, page);
+ unlock_page(vmpage);
+ if (local_env)
+ cl_env_percpu_put(local_env);
+
return result;
}