diff mbox series

[RFC,02/35] libceph: Rename alignment to offset

Message ID 20250313233341.1675324-3-dhowells@redhat.com (mailing list archive)
State New
Headers show
Series ceph, rbd, netfs: Make ceph fully use netfslib | expand

Commit Message

David Howells March 13, 2025, 11:32 p.m. UTC
Rename 'alignment' to 'offset' in a number of places where it seems to be
talking about the offset into the first page of a sequence of pages.

Signed-off-by: David Howells <dhowells@redhat.com>
cc: Viacheslav Dubeyko <slava@dubeyko.com>
cc: Alex Markuze <amarkuze@redhat.com>
cc: Ilya Dryomov <idryomov@gmail.com>
cc: ceph-devel@vger.kernel.org
cc: linux-fsdevel@vger.kernel.org
---
 fs/ceph/addr.c                  |  4 ++--
 include/linux/ceph/messenger.h  |  4 ++--
 include/linux/ceph/osd_client.h | 10 +++++-----
 net/ceph/messenger.c            | 10 +++++-----
 net/ceph/osd_client.c           | 24 ++++++++++++------------
 5 files changed, 26 insertions(+), 26 deletions(-)

Comments

Viacheslav Dubeyko March 14, 2025, 7:04 p.m. UTC | #1
On Thu, 2025-03-13 at 23:32 +0000, David Howells wrote:
> Rename 'alignment' to 'offset' in a number of places where it seems to be
> talking about the offset into the first page of a sequence of pages.
> 

Yeah, offset sounds more clear than alignment.

> Signed-off-by: David Howells <dhowells@redhat.com>
> cc: Viacheslav Dubeyko <slava@dubeyko.com>
> cc: Alex Markuze <amarkuze@redhat.com>
> cc: Ilya Dryomov <idryomov@gmail.com>
> cc: ceph-devel@vger.kernel.org
> cc: linux-fsdevel@vger.kernel.org
> ---
>  fs/ceph/addr.c                  |  4 ++--
>  include/linux/ceph/messenger.h  |  4 ++--
>  include/linux/ceph/osd_client.h | 10 +++++-----
>  net/ceph/messenger.c            | 10 +++++-----
>  net/ceph/osd_client.c           | 24 ++++++++++++------------
>  5 files changed, 26 insertions(+), 26 deletions(-)
> 
> diff --git a/fs/ceph/addr.c b/fs/ceph/addr.c
> index 20b6bd8cd004..482a9f41a685 100644
> --- a/fs/ceph/addr.c
> +++ b/fs/ceph/addr.c
> @@ -254,7 +254,7 @@ static void finish_netfs_read(struct ceph_osd_request *req)
>  
>  	if (osd_data->type == CEPH_OSD_DATA_TYPE_PAGES) {
>  		ceph_put_page_vector(osd_data->pages,
> -				     calc_pages_for(osd_data->alignment,
> +				     calc_pages_for(osd_data->offset,
>  					osd_data->length), false);
>  	}
>  	if (err > 0) {
> @@ -918,7 +918,7 @@ static void writepages_finish(struct ceph_osd_request *req)
>  		osd_data = osd_req_op_extent_osd_data(req, i);
>  		BUG_ON(osd_data->type != CEPH_OSD_DATA_TYPE_PAGES);
>  		len += osd_data->length;
> -		num_pages = calc_pages_for((u64)osd_data->alignment,
> +		num_pages = calc_pages_for((u64)osd_data->offset,
>  					   (u64)osd_data->length);
>  		total_pages += num_pages;
>  		for (j = 0; j < num_pages; j++) {
> diff --git a/include/linux/ceph/messenger.h b/include/linux/ceph/messenger.h
> index 1717cc57cdac..db2aba32b8a0 100644
> --- a/include/linux/ceph/messenger.h
> +++ b/include/linux/ceph/messenger.h
> @@ -221,7 +221,7 @@ struct ceph_msg_data {
>  		struct {
>  			struct page	**pages;

Do we still operate by pages here? It looks like we need to rework it somehow.

>  			size_t		length;		/* total # bytes */
> -			unsigned int	alignment;	/* first page */
> +			unsigned int	offset;		/* first page */

Maybe, we need to change the comment on the "first folio" here?

>  			bool		own_pages;

We are mentioning pages everywhere. :)

>  		};
>  		struct ceph_pagelist	*pagelist;
> @@ -602,7 +602,7 @@ extern bool ceph_con_keepalive_expired(struct ceph_connection *con,
>  				       unsigned long interval);
>  
>  void ceph_msg_data_add_pages(struct ceph_msg *msg, struct page **pages,
> -			     size_t length, size_t alignment, bool own_pages);
> +			     size_t length, size_t offset, bool own_pages);
>  extern void ceph_msg_data_add_pagelist(struct ceph_msg *msg,
>  				struct ceph_pagelist *pagelist);
>  #ifdef CONFIG_BLOCK
> diff --git a/include/linux/ceph/osd_client.h b/include/linux/ceph/osd_client.h
> index d55b30057a45..8fc84f389aad 100644
> --- a/include/linux/ceph/osd_client.h
> +++ b/include/linux/ceph/osd_client.h
> @@ -118,7 +118,7 @@ struct ceph_osd_data {
>  		struct {
>  			struct page	**pages;

Yeah, pages, pages, pages... :)

>  			u64		length;
> -			u32		alignment;
> +			u32		offset;
>  			bool		pages_from_pool;
>  			bool		own_pages;
>  		};
> @@ -469,7 +469,7 @@ struct ceph_osd_req_op *osd_req_op_init(struct ceph_osd_request *osd_req,
>  extern void osd_req_op_raw_data_in_pages(struct ceph_osd_request *,
>  					unsigned int which,
>  					struct page **pages, u64 length,
> -					u32 alignment, bool pages_from_pool,
> +					u32 offset, bool pages_from_pool,
>  					bool own_pages);
>  
>  extern void osd_req_op_extent_init(struct ceph_osd_request *osd_req,
> @@ -488,7 +488,7 @@ extern struct ceph_osd_data *osd_req_op_extent_osd_data(
>  extern void osd_req_op_extent_osd_data_pages(struct ceph_osd_request *,
>  					unsigned int which,
>  					struct page **pages, u64 length,
> -					u32 alignment, bool pages_from_pool,
> +					u32 offset, bool pages_from_pool,
>  					bool own_pages);
>  extern void osd_req_op_extent_osd_data_pagelist(struct ceph_osd_request *,
>  					unsigned int which,
> @@ -515,7 +515,7 @@ extern void osd_req_op_cls_request_data_pagelist(struct ceph_osd_request *,
>  extern void osd_req_op_cls_request_data_pages(struct ceph_osd_request *,
>  					unsigned int which,
>  					struct page **pages, u64 length,
> -					u32 alignment, bool pages_from_pool,
> +					u32 offset, bool pages_from_pool,
>  					bool own_pages);
>  void osd_req_op_cls_request_data_bvecs(struct ceph_osd_request *osd_req,
>  				       unsigned int which,
> @@ -524,7 +524,7 @@ void osd_req_op_cls_request_data_bvecs(struct ceph_osd_request *osd_req,
>  extern void osd_req_op_cls_response_data_pages(struct ceph_osd_request *,
>  					unsigned int which,
>  					struct page **pages, u64 length,
> -					u32 alignment, bool pages_from_pool,
> +					u32 offset, bool pages_from_pool,
>  					bool own_pages);
>  int osd_req_op_cls_init(struct ceph_osd_request *osd_req, unsigned int which,
>  			const char *class, const char *method);
> diff --git a/net/ceph/messenger.c b/net/ceph/messenger.c
> index d1b5705dc0c6..1df4291cc80b 100644
> --- a/net/ceph/messenger.c
> +++ b/net/ceph/messenger.c
> @@ -840,8 +840,8 @@ static void ceph_msg_data_pages_cursor_init(struct ceph_msg_data_cursor *cursor,
>  	BUG_ON(!data->length);
>  
>  	cursor->resid = min(length, data->length);
> -	page_count = calc_pages_for(data->alignment, (u64)data->length);
> -	cursor->page_offset = data->alignment & ~PAGE_MASK;
> +	page_count = calc_pages_for(data->offset, (u64)data->length);
> +	cursor->page_offset = data->offset & ~PAGE_MASK;

We still have a lot of work converting to folio.

>  	cursor->page_index = 0;
>  	BUG_ON(page_count > (int)USHRT_MAX);
>  	cursor->page_count = (unsigned short)page_count;
> @@ -1873,7 +1873,7 @@ static struct ceph_msg_data *ceph_msg_data_add(struct ceph_msg *msg)
>  static void ceph_msg_data_destroy(struct ceph_msg_data *data)
>  {
>  	if (data->type == CEPH_MSG_DATA_PAGES && data->own_pages) {
> -		int num_pages = calc_pages_for(data->alignment, data->length);
> +		int num_pages = calc_pages_for(data->offset, data->length);
>  		ceph_release_page_vector(data->pages, num_pages);
>  	} else if (data->type == CEPH_MSG_DATA_PAGELIST) {
>  		ceph_pagelist_release(data->pagelist);
> @@ -1881,7 +1881,7 @@ static void ceph_msg_data_destroy(struct ceph_msg_data *data)
>  }
>  
>  void ceph_msg_data_add_pages(struct ceph_msg *msg, struct page **pages,
> -			     size_t length, size_t alignment, bool own_pages)
> +			     size_t length, size_t offset, bool own_pages)

I assume a sequence "size_t offset, size_t length" looks more logical here. But
it's not critical at all.

>  {
>  	struct ceph_msg_data *data;
>  
> @@ -1892,7 +1892,7 @@ void ceph_msg_data_add_pages(struct ceph_msg *msg, struct page **pages,
>  	data->type = CEPH_MSG_DATA_PAGES;
>  	data->pages = pages;
>  	data->length = length;
> -	data->alignment = alignment & ~PAGE_MASK;
> +	data->offset = offset & ~PAGE_MASK;
>  	data->own_pages = own_pages;
>  
>  	msg->data_length += length;
> diff --git a/net/ceph/osd_client.c b/net/ceph/osd_client.c
> index b24afec24138..e359e70ad47e 100644
> --- a/net/ceph/osd_client.c
> +++ b/net/ceph/osd_client.c
> @@ -130,13 +130,13 @@ static void ceph_osd_data_init(struct ceph_osd_data *osd_data)
>   * Consumes @pages if @own_pages is true.
>   */
>  static void ceph_osd_data_pages_init(struct ceph_osd_data *osd_data,
> -			struct page **pages, u64 length, u32 alignment,
> +			struct page **pages, u64 length, u32 offset,

And here too...

>  			bool pages_from_pool, bool own_pages)
>  {
>  	osd_data->type = CEPH_OSD_DATA_TYPE_PAGES;
>  	osd_data->pages = pages;
>  	osd_data->length = length;
> -	osd_data->alignment = alignment;
> +	osd_data->offset = offset;
>  	osd_data->pages_from_pool = pages_from_pool;
>  	osd_data->own_pages = own_pages;
>  }
> @@ -196,26 +196,26 @@ EXPORT_SYMBOL(osd_req_op_extent_osd_data);
>  
>  void osd_req_op_raw_data_in_pages(struct ceph_osd_request *osd_req,
>  			unsigned int which, struct page **pages,
> -			u64 length, u32 alignment,
> +			u64 length, u32 offset,

Interesting... We have length of 64 bits but offset is only 32 bits. I assume
that length is in bytes, but offset is in pages. But still this difference in
types looks slightly strange.

>  			bool pages_from_pool, bool own_pages)
>  {
>  	struct ceph_osd_data *osd_data;
>  
>  	osd_data = osd_req_op_raw_data_in(osd_req, which);
> -	ceph_osd_data_pages_init(osd_data, pages, length, alignment,
> +	ceph_osd_data_pages_init(osd_data, pages, length, offset,
>  				pages_from_pool, own_pages);
>  }
>  EXPORT_SYMBOL(osd_req_op_raw_data_in_pages);
>  
>  void osd_req_op_extent_osd_data_pages(struct ceph_osd_request *osd_req,
>  			unsigned int which, struct page **pages,
> -			u64 length, u32 alignment,
> +			u64 length, u32 offset,

The same strange thing here...

>  			bool pages_from_pool, bool own_pages)
>  {
>  	struct ceph_osd_data *osd_data;
>  
>  	osd_data = osd_req_op_data(osd_req, which, extent, osd_data);
> -	ceph_osd_data_pages_init(osd_data, pages, length, alignment,
> +	ceph_osd_data_pages_init(osd_data, pages, length, offset,
>  				pages_from_pool, own_pages);
>  }
>  EXPORT_SYMBOL(osd_req_op_extent_osd_data_pages);
> @@ -312,12 +312,12 @@ EXPORT_SYMBOL(osd_req_op_cls_request_data_pagelist);
>  
>  void osd_req_op_cls_request_data_pages(struct ceph_osd_request *osd_req,
>  			unsigned int which, struct page **pages, u64 length,
> -			u32 alignment, bool pages_from_pool, bool own_pages)
> +			u32 offset, bool pages_from_pool, bool own_pages)
>  {
>  	struct ceph_osd_data *osd_data;
>  
>  	osd_data = osd_req_op_data(osd_req, which, cls, request_data);
> -	ceph_osd_data_pages_init(osd_data, pages, length, alignment,
> +	ceph_osd_data_pages_init(osd_data, pages, length, offset,
>  				pages_from_pool, own_pages);
>  	osd_req->r_ops[which].cls.indata_len += length;
>  	osd_req->r_ops[which].indata_len += length;
> @@ -344,12 +344,12 @@ EXPORT_SYMBOL(osd_req_op_cls_request_data_bvecs);
>  
>  void osd_req_op_cls_response_data_pages(struct ceph_osd_request *osd_req,
>  			unsigned int which, struct page **pages, u64 length,
> -			u32 alignment, bool pages_from_pool, bool own_pages)
> +			u32 offset, bool pages_from_pool, bool own_pages)
>  {
>  	struct ceph_osd_data *osd_data;
>  
>  	osd_data = osd_req_op_data(osd_req, which, cls, response_data);
> -	ceph_osd_data_pages_init(osd_data, pages, length, alignment,
> +	ceph_osd_data_pages_init(osd_data, pages, length, offset,
>  				pages_from_pool, own_pages);
>  }
>  EXPORT_SYMBOL(osd_req_op_cls_response_data_pages);
> @@ -382,7 +382,7 @@ static void ceph_osd_data_release(struct ceph_osd_data *osd_data)
>  	if (osd_data->type == CEPH_OSD_DATA_TYPE_PAGES && osd_data->own_pages) {
>  		int num_pages;
>  
> -		num_pages = calc_pages_for((u64)osd_data->alignment,
> +		num_pages = calc_pages_for((u64)osd_data->offset,
>  						(u64)osd_data->length);

As far as I can see, length is already u64, but offset is u32. Why do we not
have u64 for both fields? Then we don't need in (u64)osd_data->length/offset
here.

>  		ceph_release_page_vector(osd_data->pages, num_pages);
>  	} else if (osd_data->type == CEPH_OSD_DATA_TYPE_PAGELIST) {
> @@ -969,7 +969,7 @@ static void ceph_osdc_msg_data_add(struct ceph_msg *msg,
>  		BUG_ON(length > (u64) SIZE_MAX);
>  		if (length)
>  			ceph_msg_data_add_pages(msg, osd_data->pages,
> -					length, osd_data->alignment, false);
> +					length, osd_data->offset, false);
>  	} else if (osd_data->type == CEPH_OSD_DATA_TYPE_PAGELIST) {
>  		BUG_ON(!length);
>  		ceph_msg_data_add_pagelist(msg, osd_data->pagelist);
> 
> 

Thanks,
Slava.
David Howells March 14, 2025, 8:01 p.m. UTC | #2
Viacheslav Dubeyko <Slava.Dubeyko@ibm.com> wrote:

> >  		struct {
> >  			struct page	**pages;
> 
> Do we still operate by pages here? It looks like we need to rework it somehow.

One of the points of these patches is to rework this, working towards reducing
everything to just an iterator where possible, using a segmented list as the
actual buffers.

One of the things hopefully to be discussed at LSF/MM is how we might combine
struct folio_queue, struct bvec[] and struct scatterlist into something that
can hold references to more general pieces of memory and not just folios - and
that might be something we can use here for handing buffers about.

Anyway, my aim is to get all references to pages and folios (as far as
possible) out of 9p, afs, cifs and ceph - delegating all of that to netfslib
for ceph (rbd is slightly different - but I've completed the transformation
there).

Netfslib will pass an iterator to each subrequest describing the buffer, and
we might need to go from there to another iterator describing a bounce buffer
for transport encryption, but from there, we should pass the iterator directly
to the socket.

Further, I would like to make it so that we can link these buffers together
such that we can fabricate an entire message within a single iterator - and
then we no longer need to cork the TCP socket.

David
diff mbox series

Patch

diff --git a/fs/ceph/addr.c b/fs/ceph/addr.c
index 20b6bd8cd004..482a9f41a685 100644
--- a/fs/ceph/addr.c
+++ b/fs/ceph/addr.c
@@ -254,7 +254,7 @@  static void finish_netfs_read(struct ceph_osd_request *req)
 
 	if (osd_data->type == CEPH_OSD_DATA_TYPE_PAGES) {
 		ceph_put_page_vector(osd_data->pages,
-				     calc_pages_for(osd_data->alignment,
+				     calc_pages_for(osd_data->offset,
 					osd_data->length), false);
 	}
 	if (err > 0) {
@@ -918,7 +918,7 @@  static void writepages_finish(struct ceph_osd_request *req)
 		osd_data = osd_req_op_extent_osd_data(req, i);
 		BUG_ON(osd_data->type != CEPH_OSD_DATA_TYPE_PAGES);
 		len += osd_data->length;
-		num_pages = calc_pages_for((u64)osd_data->alignment,
+		num_pages = calc_pages_for((u64)osd_data->offset,
 					   (u64)osd_data->length);
 		total_pages += num_pages;
 		for (j = 0; j < num_pages; j++) {
diff --git a/include/linux/ceph/messenger.h b/include/linux/ceph/messenger.h
index 1717cc57cdac..db2aba32b8a0 100644
--- a/include/linux/ceph/messenger.h
+++ b/include/linux/ceph/messenger.h
@@ -221,7 +221,7 @@  struct ceph_msg_data {
 		struct {
 			struct page	**pages;
 			size_t		length;		/* total # bytes */
-			unsigned int	alignment;	/* first page */
+			unsigned int	offset;		/* first page */
 			bool		own_pages;
 		};
 		struct ceph_pagelist	*pagelist;
@@ -602,7 +602,7 @@  extern bool ceph_con_keepalive_expired(struct ceph_connection *con,
 				       unsigned long interval);
 
 void ceph_msg_data_add_pages(struct ceph_msg *msg, struct page **pages,
-			     size_t length, size_t alignment, bool own_pages);
+			     size_t length, size_t offset, bool own_pages);
 extern void ceph_msg_data_add_pagelist(struct ceph_msg *msg,
 				struct ceph_pagelist *pagelist);
 #ifdef CONFIG_BLOCK
diff --git a/include/linux/ceph/osd_client.h b/include/linux/ceph/osd_client.h
index d55b30057a45..8fc84f389aad 100644
--- a/include/linux/ceph/osd_client.h
+++ b/include/linux/ceph/osd_client.h
@@ -118,7 +118,7 @@  struct ceph_osd_data {
 		struct {
 			struct page	**pages;
 			u64		length;
-			u32		alignment;
+			u32		offset;
 			bool		pages_from_pool;
 			bool		own_pages;
 		};
@@ -469,7 +469,7 @@  struct ceph_osd_req_op *osd_req_op_init(struct ceph_osd_request *osd_req,
 extern void osd_req_op_raw_data_in_pages(struct ceph_osd_request *,
 					unsigned int which,
 					struct page **pages, u64 length,
-					u32 alignment, bool pages_from_pool,
+					u32 offset, bool pages_from_pool,
 					bool own_pages);
 
 extern void osd_req_op_extent_init(struct ceph_osd_request *osd_req,
@@ -488,7 +488,7 @@  extern struct ceph_osd_data *osd_req_op_extent_osd_data(
 extern void osd_req_op_extent_osd_data_pages(struct ceph_osd_request *,
 					unsigned int which,
 					struct page **pages, u64 length,
-					u32 alignment, bool pages_from_pool,
+					u32 offset, bool pages_from_pool,
 					bool own_pages);
 extern void osd_req_op_extent_osd_data_pagelist(struct ceph_osd_request *,
 					unsigned int which,
@@ -515,7 +515,7 @@  extern void osd_req_op_cls_request_data_pagelist(struct ceph_osd_request *,
 extern void osd_req_op_cls_request_data_pages(struct ceph_osd_request *,
 					unsigned int which,
 					struct page **pages, u64 length,
-					u32 alignment, bool pages_from_pool,
+					u32 offset, bool pages_from_pool,
 					bool own_pages);
 void osd_req_op_cls_request_data_bvecs(struct ceph_osd_request *osd_req,
 				       unsigned int which,
@@ -524,7 +524,7 @@  void osd_req_op_cls_request_data_bvecs(struct ceph_osd_request *osd_req,
 extern void osd_req_op_cls_response_data_pages(struct ceph_osd_request *,
 					unsigned int which,
 					struct page **pages, u64 length,
-					u32 alignment, bool pages_from_pool,
+					u32 offset, bool pages_from_pool,
 					bool own_pages);
 int osd_req_op_cls_init(struct ceph_osd_request *osd_req, unsigned int which,
 			const char *class, const char *method);
diff --git a/net/ceph/messenger.c b/net/ceph/messenger.c
index d1b5705dc0c6..1df4291cc80b 100644
--- a/net/ceph/messenger.c
+++ b/net/ceph/messenger.c
@@ -840,8 +840,8 @@  static void ceph_msg_data_pages_cursor_init(struct ceph_msg_data_cursor *cursor,
 	BUG_ON(!data->length);
 
 	cursor->resid = min(length, data->length);
-	page_count = calc_pages_for(data->alignment, (u64)data->length);
-	cursor->page_offset = data->alignment & ~PAGE_MASK;
+	page_count = calc_pages_for(data->offset, (u64)data->length);
+	cursor->page_offset = data->offset & ~PAGE_MASK;
 	cursor->page_index = 0;
 	BUG_ON(page_count > (int)USHRT_MAX);
 	cursor->page_count = (unsigned short)page_count;
@@ -1873,7 +1873,7 @@  static struct ceph_msg_data *ceph_msg_data_add(struct ceph_msg *msg)
 static void ceph_msg_data_destroy(struct ceph_msg_data *data)
 {
 	if (data->type == CEPH_MSG_DATA_PAGES && data->own_pages) {
-		int num_pages = calc_pages_for(data->alignment, data->length);
+		int num_pages = calc_pages_for(data->offset, data->length);
 		ceph_release_page_vector(data->pages, num_pages);
 	} else if (data->type == CEPH_MSG_DATA_PAGELIST) {
 		ceph_pagelist_release(data->pagelist);
@@ -1881,7 +1881,7 @@  static void ceph_msg_data_destroy(struct ceph_msg_data *data)
 }
 
 void ceph_msg_data_add_pages(struct ceph_msg *msg, struct page **pages,
-			     size_t length, size_t alignment, bool own_pages)
+			     size_t length, size_t offset, bool own_pages)
 {
 	struct ceph_msg_data *data;
 
@@ -1892,7 +1892,7 @@  void ceph_msg_data_add_pages(struct ceph_msg *msg, struct page **pages,
 	data->type = CEPH_MSG_DATA_PAGES;
 	data->pages = pages;
 	data->length = length;
-	data->alignment = alignment & ~PAGE_MASK;
+	data->offset = offset & ~PAGE_MASK;
 	data->own_pages = own_pages;
 
 	msg->data_length += length;
diff --git a/net/ceph/osd_client.c b/net/ceph/osd_client.c
index b24afec24138..e359e70ad47e 100644
--- a/net/ceph/osd_client.c
+++ b/net/ceph/osd_client.c
@@ -130,13 +130,13 @@  static void ceph_osd_data_init(struct ceph_osd_data *osd_data)
  * Consumes @pages if @own_pages is true.
  */
 static void ceph_osd_data_pages_init(struct ceph_osd_data *osd_data,
-			struct page **pages, u64 length, u32 alignment,
+			struct page **pages, u64 length, u32 offset,
 			bool pages_from_pool, bool own_pages)
 {
 	osd_data->type = CEPH_OSD_DATA_TYPE_PAGES;
 	osd_data->pages = pages;
 	osd_data->length = length;
-	osd_data->alignment = alignment;
+	osd_data->offset = offset;
 	osd_data->pages_from_pool = pages_from_pool;
 	osd_data->own_pages = own_pages;
 }
@@ -196,26 +196,26 @@  EXPORT_SYMBOL(osd_req_op_extent_osd_data);
 
 void osd_req_op_raw_data_in_pages(struct ceph_osd_request *osd_req,
 			unsigned int which, struct page **pages,
-			u64 length, u32 alignment,
+			u64 length, u32 offset,
 			bool pages_from_pool, bool own_pages)
 {
 	struct ceph_osd_data *osd_data;
 
 	osd_data = osd_req_op_raw_data_in(osd_req, which);
-	ceph_osd_data_pages_init(osd_data, pages, length, alignment,
+	ceph_osd_data_pages_init(osd_data, pages, length, offset,
 				pages_from_pool, own_pages);
 }
 EXPORT_SYMBOL(osd_req_op_raw_data_in_pages);
 
 void osd_req_op_extent_osd_data_pages(struct ceph_osd_request *osd_req,
 			unsigned int which, struct page **pages,
-			u64 length, u32 alignment,
+			u64 length, u32 offset,
 			bool pages_from_pool, bool own_pages)
 {
 	struct ceph_osd_data *osd_data;
 
 	osd_data = osd_req_op_data(osd_req, which, extent, osd_data);
-	ceph_osd_data_pages_init(osd_data, pages, length, alignment,
+	ceph_osd_data_pages_init(osd_data, pages, length, offset,
 				pages_from_pool, own_pages);
 }
 EXPORT_SYMBOL(osd_req_op_extent_osd_data_pages);
@@ -312,12 +312,12 @@  EXPORT_SYMBOL(osd_req_op_cls_request_data_pagelist);
 
 void osd_req_op_cls_request_data_pages(struct ceph_osd_request *osd_req,
 			unsigned int which, struct page **pages, u64 length,
-			u32 alignment, bool pages_from_pool, bool own_pages)
+			u32 offset, bool pages_from_pool, bool own_pages)
 {
 	struct ceph_osd_data *osd_data;
 
 	osd_data = osd_req_op_data(osd_req, which, cls, request_data);
-	ceph_osd_data_pages_init(osd_data, pages, length, alignment,
+	ceph_osd_data_pages_init(osd_data, pages, length, offset,
 				pages_from_pool, own_pages);
 	osd_req->r_ops[which].cls.indata_len += length;
 	osd_req->r_ops[which].indata_len += length;
@@ -344,12 +344,12 @@  EXPORT_SYMBOL(osd_req_op_cls_request_data_bvecs);
 
 void osd_req_op_cls_response_data_pages(struct ceph_osd_request *osd_req,
 			unsigned int which, struct page **pages, u64 length,
-			u32 alignment, bool pages_from_pool, bool own_pages)
+			u32 offset, bool pages_from_pool, bool own_pages)
 {
 	struct ceph_osd_data *osd_data;
 
 	osd_data = osd_req_op_data(osd_req, which, cls, response_data);
-	ceph_osd_data_pages_init(osd_data, pages, length, alignment,
+	ceph_osd_data_pages_init(osd_data, pages, length, offset,
 				pages_from_pool, own_pages);
 }
 EXPORT_SYMBOL(osd_req_op_cls_response_data_pages);
@@ -382,7 +382,7 @@  static void ceph_osd_data_release(struct ceph_osd_data *osd_data)
 	if (osd_data->type == CEPH_OSD_DATA_TYPE_PAGES && osd_data->own_pages) {
 		int num_pages;
 
-		num_pages = calc_pages_for((u64)osd_data->alignment,
+		num_pages = calc_pages_for((u64)osd_data->offset,
 						(u64)osd_data->length);
 		ceph_release_page_vector(osd_data->pages, num_pages);
 	} else if (osd_data->type == CEPH_OSD_DATA_TYPE_PAGELIST) {
@@ -969,7 +969,7 @@  static void ceph_osdc_msg_data_add(struct ceph_msg *msg,
 		BUG_ON(length > (u64) SIZE_MAX);
 		if (length)
 			ceph_msg_data_add_pages(msg, osd_data->pages,
-					length, osd_data->alignment, false);
+					length, osd_data->offset, false);
 	} else if (osd_data->type == CEPH_OSD_DATA_TYPE_PAGELIST) {
 		BUG_ON(!length);
 		ceph_msg_data_add_pagelist(msg, osd_data->pagelist);