@@ -735,7 +735,8 @@ struct cl_page {
refcount_t cp_ref;
/** layout_entry + stripe index, composed using lov_comp_index() */
unsigned int cp_lov_index;
- pgoff_t cp_osc_index;
+ /** page->index of the page within the whole file */
+ pgoff_t cp_page_index;
/** An object this page is a part of. Immutable after creation. */
struct cl_object *cp_obj;
/** vmpage */
@@ -855,7 +855,7 @@ static inline struct osc_page *oap2osc(struct osc_async_page *oap)
static inline pgoff_t osc_index(struct osc_page *opg)
{
- return opg->ops_cl.cpl_page->cp_osc_index;
+ return opg->ops_oap.oap_obj_off >> PAGE_SHIFT;
}
static inline struct cl_page *oap2cl_page(struct osc_async_page *oap)
@@ -1213,6 +1213,9 @@ static inline void client_adjust_max_dirty(struct client_obd *cli)
1 << (20 - PAGE_SHIFT));
}
+/* Must be used for page cache pages only,
+ * not safe otherwise (e.g. direct IO pages)
+ */
static inline struct inode *page2inode(struct page *page)
{
if (page->mapping) {
@@ -440,6 +440,9 @@ int ll_file_release(struct inode *inode, struct file *file)
static inline int ll_dom_readpage(void *data, struct page *page)
{
+ /* since ll_dom_readpage is a page cache helper, it is safe to assume
+ * mapping and host pointers are set here
+ */
struct inode *inode = page2inode(page);
struct niobuf_local *lnb = data;
void *kaddr;
@@ -235,9 +235,16 @@ struct cl_page *cl_page_alloc(const struct lu_env *env, struct cl_object *o,
cl_page->cp_vmpage = vmpage;
cl_page->cp_state = CPS_CACHED;
cl_page->cp_type = type;
- cl_page->cp_inode = NULL;
+ if (type == CPT_TRANSIENT)
+ /* ref to correct inode will be added
+ * in ll_direct_rw_pages
+ */
+ cl_page->cp_inode = NULL;
+ else
+ cl_page->cp_inode = page2inode(vmpage);
INIT_LIST_HEAD(&cl_page->cp_batch);
lu_ref_init(&cl_page->cp_reference);
+ cl_page->cp_page_index = ind;
cl_object_for_each(o2, o) {
if (o2->co_ops->coo_page_init) {
result = o2->co_ops->coo_page_init(env, o2,
@@ -266,7 +266,6 @@ int osc_page_init(const struct lu_env *env, struct cl_object *obj,
opg->ops_srvlock = osc_io_srvlock(oio);
cl_page_slice_add(cl_page, &opg->ops_cl, obj, &osc_page_ops);
- cl_page->cp_osc_index = index;
/* reserve an LRU space for this cl_page */
if (cl_page->cp_type == CPT_CACHEABLE) {
@@ -1417,21 +1417,13 @@ static int osc_brw_prep_request(int cmd, struct client_obd *cli,
struct inode *inode = NULL;
bool directio = false;
bool enable_checksum = true;
+ struct cl_page *clpage;
if (pga[0]->pg) {
- inode = page2inode(pga[0]->pg);
- if (!inode) {
- /* Try to get reference to inode from cl_page if we are
- * dealing with direct IO, as handled pages are not
- * actual page cache pages.
- */
- struct osc_async_page *oap = brw_page2oap(pga[0]);
- struct cl_page *clpage = oap2cl_page(oap);
-
- inode = clpage->cp_inode;
- if (inode)
- directio = true;
- }
+ clpage = oap2cl_page(brw_page2oap(pga[0]));
+ inode = clpage->cp_inode;
+ if (clpage->cp_type == CPT_TRANSIENT)
+ directio = true;
}
if (OBD_FAIL_CHECK(OBD_FAIL_OSC_BRW_PREP_REQ))
return -ENOMEM; /* Recoverable */
@@ -1453,11 +1445,11 @@ static int osc_brw_prep_request(int cmd, struct client_obd *cli,
if (opc == OST_WRITE && inode && IS_ENCRYPTED(inode) &&
fscrypt_has_encryption_key(inode)) {
for (i = 0; i < page_count; i++) {
- struct brw_page *pg = pga[i];
+ struct brw_page *brwpg = pga[i];
struct page *data_page = NULL;
bool retried = false;
bool lockedbymyself;
- u32 nunits = (pg->off & ~PAGE_MASK) + pg->count;
+ u32 nunits = (brwpg->off & ~PAGE_MASK) + brwpg->count;
struct address_space *map_orig = NULL;
pgoff_t index_orig;
@@ -1472,23 +1464,24 @@ static int osc_brw_prep_request(int cmd, struct client_obd *cli,
* end in vvp_page_completion_write/cl_page_completion,
* which means only once the page is fully processed.
*/
- lockedbymyself = trylock_page(pg->pg);
+ lockedbymyself = trylock_page(brwpg->pg);
if (directio) {
- map_orig = pg->pg->mapping;
- pg->pg->mapping = inode->i_mapping;
- index_orig = pg->pg->index;
- pg->pg->index = pg->off >> PAGE_SHIFT;
+ map_orig = brwpg->pg->mapping;
+ brwpg->pg->mapping = inode->i_mapping;
+ index_orig = brwpg->pg->index;
+ clpage = oap2cl_page(brw_page2oap(brwpg));
+ brwpg->pg->index = clpage->cp_page_index;
}
data_page =
- fscrypt_encrypt_pagecache_blocks(pg->pg,
+ fscrypt_encrypt_pagecache_blocks(brwpg->pg,
nunits, 0,
GFP_NOFS);
if (directio) {
- pg->pg->mapping = map_orig;
- pg->pg->index = index_orig;
+ brwpg->pg->mapping = map_orig;
+ brwpg->pg->index = index_orig;
}
if (lockedbymyself)
- unlock_page(pg->pg);
+ unlock_page(brwpg->pg);
if (IS_ERR(data_page)) {
rc = PTR_ERR(data_page);
if (rc == -ENOMEM && !retried) {
@@ -1503,10 +1496,11 @@ static int osc_brw_prep_request(int cmd, struct client_obd *cli,
* disambiguation in osc_release_bounce_pages().
*/
SetPageChecked(data_page);
- pg->pg = data_page;
+ brwpg->pg = data_page;
/* there should be no gap in the middle of page array */
if (i == page_count - 1) {
- struct osc_async_page *oap = brw_page2oap(pg);
+ struct osc_async_page *oap =
+ brw_page2oap(brwpg);
oa->o_size = oap->oap_count +
oap->oap_obj_off +
@@ -1515,10 +1509,10 @@ static int osc_brw_prep_request(int cmd, struct client_obd *cli,
/* len is forced to nunits, and relative offset to 0
* so store the old, clear text info
*/
- pg->bp_count_diff = nunits - pg->count;
- pg->count = nunits;
- pg->bp_off_diff = pg->off & ~PAGE_MASK;
- pg->off = pg->off & PAGE_MASK;
+ brwpg->bp_count_diff = nunits - brwpg->count;
+ brwpg->count = nunits;
+ brwpg->bp_off_diff = brwpg->off & ~PAGE_MASK;
+ brwpg->off = brwpg->off & PAGE_MASK;
}
} else if (opc == OST_WRITE && inode && IS_ENCRYPTED(inode)) {
struct osc_async_page *oap = brw_page2oap(pga[0]);
@@ -1991,8 +1985,9 @@ static int osc_brw_fini_request(struct ptlrpc_request *req, int rc)
const char *obd_name = cli->cl_import->imp_obd->obd_name;
struct ost_body *body;
u32 client_cksum = 0;
- struct inode *inode;
+ struct inode *inode = NULL;
unsigned int blockbits = 0, blocksize = 0;
+ struct cl_page *clpage;
if (rc < 0 && rc != -EDQUOT) {
DEBUG_REQ(D_INFO, req, "Failed request: rc = %d", rc);
@@ -2186,19 +2181,12 @@ static int osc_brw_fini_request(struct ptlrpc_request *req, int rc)
rc = 0;
}
- inode = page2inode(aa->aa_ppga[0]->pg);
- if (!inode) {
- /* Try to get reference to inode from cl_page if we are
- * dealing with direct IO, as handled pages are not
- * actual page cache pages.
- */
- struct osc_async_page *oap = brw_page2oap(aa->aa_ppga[0]);
-
- inode = oap2cl_page(oap)->cp_inode;
- if (inode) {
- blockbits = inode->i_blkbits;
- blocksize = 1 << blockbits;
- }
+ /* get the inode from the first cl_page */
+ clpage = oap2cl_page(brw_page2oap(aa->aa_ppga[0]));
+ inode = clpage->cp_inode;
+ if (clpage->cp_type == CPT_TRANSIENT && inode) {
+ blockbits = inode->i_blkbits;
+ blocksize = 1 << blockbits;
}
if (inode && IS_ENCRYPTED(inode)) {
int idx;
@@ -2208,19 +2196,19 @@ static int osc_brw_fini_request(struct ptlrpc_request *req, int rc)
goto out;
}
for (idx = 0; idx < aa->aa_page_count; idx++) {
- struct brw_page *pg = aa->aa_ppga[idx];
+ struct brw_page *brwpg = aa->aa_ppga[idx];
unsigned int offs = 0;
while (offs < PAGE_SIZE) {
/* do not decrypt if page is all 0s */
- if (memchr_inv(page_address(pg->pg) + offs, 0,
- LUSTRE_ENCRYPTION_UNIT_SIZE) == NULL) {
+ if (!memchr_inv(page_address(brwpg->pg) + offs, 0,
+ LUSTRE_ENCRYPTION_UNIT_SIZE)) {
/* if page is empty forward info to
* upper layers (ll_io_zero_page) by
* clearing PagePrivate2
*/
if (!offs)
- ClearPagePrivate2(pg->pg);
+ ClearPagePrivate2(brwpg->pg);
break;
}
@@ -2230,24 +2218,25 @@ static int osc_brw_fini_request(struct ptlrpc_request *req, int rc)
* input parameter. Page does not need
* to be locked.
*/
- u64 lblk_num =
- ((u64)(pg->off >> PAGE_SHIFT) <<
- (PAGE_SHIFT - blockbits)) +
- (offs >> blockbits);
+ u64 lblk_num;
unsigned int i;
+ clpage = oap2cl_page(brw_page2oap(brwpg));
+ lblk_num = ((u64)(brwpg->off >> PAGE_SHIFT) <<
+ (PAGE_SHIFT - blockbits)) +
+ (offs >> blockbits);
for (i = offs;
i < offs + LUSTRE_ENCRYPTION_UNIT_SIZE;
i += blocksize, lblk_num++) {
rc = fscrypt_decrypt_block_inplace(inode,
- pg->pg,
+ brwpg->pg,
blocksize, i,
lblk_num);
if (rc)
break;
}
} else {
- rc = fscrypt_decrypt_pagecache_blocks(pg->pg,
+ rc = fscrypt_decrypt_pagecache_blocks(brwpg->pg,
LUSTRE_ENCRYPTION_UNIT_SIZE,
offs);
}