@@ -788,6 +788,8 @@ int ll_md_blocking_ast(struct ldlm_lock *lock, struct ldlm_lock_desc *desc,
int ll_writepage(struct page *page, struct writeback_control *wbc);
int ll_writepages(struct address_space *mapping, struct writeback_control *wbc);
int ll_readpage(struct file *file, struct page *page);
+int ll_io_read_page(const struct lu_env *env, struct cl_io *io,
+ struct cl_page *page, struct file *file);
void ll_readahead_init(struct inode *inode, struct ll_readahead_state *ras);
int vvp_io_write_commit(const struct lu_env *env, struct cl_io *io);
@@ -1094,7 +1094,7 @@ void ll_cl_remove(struct file *file, const struct lu_env *env)
write_unlock(&fd->fd_lock);
}
-static int ll_io_read_page(const struct lu_env *env, struct cl_io *io,
+int ll_io_read_page(const struct lu_env *env, struct cl_io *io,
struct cl_page *page, struct file *file)
{
struct inode *inode = vvp_object_inode(page->cp_obj);
@@ -1153,6 +1153,7 @@ static int ll_io_read_page(const struct lu_env *env, struct cl_io *io,
if (!rc)
task_io_account_read(PAGE_SIZE * count);
}
+
if (anchor && !cl_page_is_owned(page, io)) { /* have sent */
rc = cl_sync_io_wait(env, anchor, 0);
@@ -1174,10 +1175,9 @@ static int ll_io_read_page(const struct lu_env *env, struct cl_io *io,
/* TODO: discard all pages until page reinit route is implemented */
cl_page_list_discard(env, io, &queue->c2_qin);
- /*
- * Unlock unsent pages in case of error.
- */
+ /* Unlock unsent read pages in case of error. */
cl_page_list_disown(env, io, &queue->c2_qin);
+
cl_2queue_fini(env, queue);
return rc;
@@ -1270,6 +1270,7 @@ int ll_readpage(struct file *file, struct page *vmpage)
LASSERT(page->cp_type == CPT_CACHEABLE);
if (likely(!PageUptodate(vmpage))) {
cl_page_assume(env, io, page);
+
result = ll_io_read_page(env, io, page, file);
} else {
/* Page from a non-object file. */
@@ -1283,28 +1284,3 @@ int ll_readpage(struct file *file, struct page *vmpage)
}
return result;
}
-
-int ll_page_sync_io(const struct lu_env *env, struct cl_io *io,
- struct cl_page *page, enum cl_req_type crt)
-{
- struct cl_2queue *queue;
- int result;
-
- LASSERT(io->ci_type == CIT_READ || io->ci_type == CIT_WRITE);
-
- queue = &io->ci_queue;
- cl_2queue_init_page(queue, page);
-
- result = cl_io_submit_sync(env, io, crt, queue, 0);
- LASSERT(cl_page_is_owned(page, io));
-
- if (crt == CRT_READ)
- /*
- * in CRT_WRITE case page is left locked even in case of
- * error.
- */
- cl_page_list_disown(env, io, &queue->c2_qin);
- cl_2queue_fini(env, queue);
-
- return result;
-}
@@ -383,9 +383,11 @@ static ssize_t ll_direct_IO(struct kiocb *iocb, struct iov_iter *iter)
/**
* Prepare partially written-to page for a write.
+ * @pg is owned when passed in and disowned when it returns non-zero result to
+ * the caller.
*/
static int ll_prepare_partial_page(const struct lu_env *env, struct cl_io *io,
- struct cl_page *pg)
+ struct cl_page *pg, struct file *file)
{
struct cl_attr *attr = vvp_env_thread_attr(env);
struct cl_object *obj = io->ci_obj;
@@ -396,23 +398,48 @@ static int ll_prepare_partial_page(const struct lu_env *env, struct cl_io *io,
cl_object_attr_lock(obj);
result = cl_object_attr_get(env, obj, attr);
cl_object_attr_unlock(obj);
- if (result == 0) {
- /*
- * If are writing to a new page, no need to read old data.
- * The extent locking will have updated the KMS, and for our
- * purposes here we can treat it like i_size.
- */
- if (attr->cat_kms <= offset) {
- char *kaddr = kmap_atomic(vpg->vpg_page);
+ if (result) {
+ cl_page_disown(env, io, pg);
+ goto out;
+ }
- memset(kaddr, 0, cl_page_size(obj));
- kunmap_atomic(kaddr);
- } else if (vpg->vpg_defer_uptodate) {
- vpg->vpg_ra_used = 1;
- } else {
- result = ll_page_sync_io(env, io, pg, CRT_READ);
+ /*
+ * If are writing to a new page, no need to read old data.
+ * The extent locking will have updated the KMS, and for our
+ * purposes here we can treat it like i_size.
+ */
+ if (attr->cat_kms <= offset) {
+ char *kaddr = kmap_atomic(vpg->vpg_page);
+
+ memset(kaddr, 0, cl_page_size(obj));
+ kunmap_atomic(kaddr);
+ result = 0;
+ goto out;
+ }
+
+ if (vpg->vpg_defer_uptodate) {
+ vpg->vpg_ra_used = 1;
+ result = 0;
+ goto out;
+ }
+
+ result = ll_io_read_page(env, io, pg, file);
+ if (result)
+ goto out;
+
+ /* ll_io_read_page() disowns the page */
+ result = cl_page_own(env, io, pg);
+ if (!result) {
+ if (!PageUptodate(cl_page_vmpage(pg))) {
+ cl_page_disown(env, io, pg);
+ result = -EIO;
}
+ } else if (result == -ENOENT) {
+ /* page was truncated */
+ result = -EAGAIN;
}
+
+out:
return result;
}
@@ -452,7 +479,7 @@ static int ll_write_begin(struct file *file, struct address_space *mapping,
result = -EBUSY;
goto out;
}
-
+again:
/* To avoid deadlock, try to lock page first. */
vmpage = grab_cache_page_nowait(mapping, index);
if (unlikely(!vmpage || PageDirty(vmpage) || PageWriteback(vmpage))) {
@@ -509,13 +536,19 @@ static int ll_write_begin(struct file *file, struct address_space *mapping,
* is a lockless IO. In that case, it's not necessary
* to read the data.
*/
- result = ll_prepare_partial_page(env, io, page);
- if (result == 0)
- SetPageUptodate(vmpage);
+ result = ll_prepare_partial_page(env, io, page, file);
+ if (result) {
+ /* vmpage should have been unlocked */
+ put_page(vmpage);
+ vmpage = NULL;
+
+ if (result == -EAGAIN)
+ goto again;
+
+ goto out;
+ }
}
}
- if (result < 0)
- cl_page_unassume(env, io, page);
out:
if (result < 0) {
if (vmpage) {