@@ -1831,7 +1831,6 @@ int ll_io_zero_page(struct inode *inode, pgoff_t index, pgoff_t offset,
struct cl_2queue *queue = NULL;
struct cl_sync_io *anchor = NULL;
bool holdinglock = false;
- bool lockedbymyself = true;
int rc;
env = cl_env_get(&refcheck);
@@ -1888,8 +1887,10 @@ int ll_io_zero_page(struct inode *inode, pgoff_t index, pgoff_t offset,
if (!PageUptodate(vmpage) && !PageDirty(vmpage) &&
!PageWriteback(vmpage)) {
/* read page */
- /* set PagePrivate2 to detect special case of empty page
- * in osc_brw_fini_request()
+ /* Set PagePrivate2 to detect special case of empty page
+ * in osc_brw_fini_request().
+ * It is also used to tell ll_io_read_page() that we do not
+ * want the vmpage to be unlocked.
*/
SetPagePrivate2(vmpage);
rc = ll_io_read_page(env, io, clpage, NULL);
@@ -1900,17 +1901,18 @@ int ll_io_zero_page(struct inode *inode, pgoff_t index, pgoff_t offset,
* file, we must not zero and write as below. Subsequent
* server-side truncate will handle things correctly.
*/
+ cl_page_unassume(env, io, clpage);
rc = 0;
goto clpfini;
}
ClearPagePrivate2(vmpage);
if (rc)
goto clpfini;
- lockedbymyself = trylock_page(vmpage);
- cl_page_assume(env, io, clpage);
}
- /* zero range in page */
+ /* Thanks to PagePrivate2 flag, ll_io_read_page() did not unlock
+ * the vmpage, so we are good to proceed and zero range in page.
+ */
zero_user(vmpage, offset, len);
if (holdinglock && clpage) {
@@ -1940,10 +1942,8 @@ int ll_io_zero_page(struct inode *inode, pgoff_t index, pgoff_t offset,
if (clpage)
cl_page_put(env, clpage);
pagefini:
- if (lockedbymyself) {
- unlock_page(vmpage);
- put_page(vmpage);
- }
+ unlock_page(vmpage);
+ put_page(vmpage);
rellock:
if (holdinglock)
cl_lock_release(env, lock);
@@ -1630,18 +1630,24 @@ int ll_io_read_page(const struct lu_env *env, struct cl_io *io,
struct vvp_io *vio = vvp_env_io(env);
bool mmap = !vio->vui_ra_valid;
struct cl_sync_io *anchor = NULL;
+ bool unlockpage = true, uptodate;
pgoff_t ra_start_index = 0;
pgoff_t io_start_index;
pgoff_t io_end_index;
int rc = 0, rc2 = 0;
struct vvp_page *vpg;
- bool uptodate;
if (file) {
fd = file->private_data;
ras = &fd->fd_ras;
}
+ /* PagePrivate2 is set in ll_io_zero_page() to tell us the vmpage
+ * must not be unlocked after processing.
+ */
+ if (page->cp_vmpage && PagePrivate2(page->cp_vmpage))
+ unlockpage = false;
+
vpg = cl2vvp_page(cl_object_page_slice(page->cp_obj, page));
uptodate = vpg->vpg_defer_uptodate;
@@ -1721,7 +1727,8 @@ int ll_io_read_page(const struct lu_env *env, struct cl_io *io,
*/
cl_page_discard(env, io, page);
}
- cl_page_disown(env, io, page);
+ if (unlockpage)
+ cl_page_disown(env, io, page);
}
/* TODO: discard all pages until page reinit route is implemented */