diff mbox series

[for-next,v3,4/5] null_blk: pass transfer size to null_handle_rq()

Message ID 20250115042910.1149966-5-shinichiro.kawasaki@wdc.com (mailing list archive)
State New
Headers show
Series null_blk: improve write failure simulation | expand

Checks

Context Check Description
shin/vmtest-for-next-PR success PR summary
shin/vmtest-for-next-VM_Test-0 success Logs for build-kernel

Commit Message

Shinichiro Kawasaki Jan. 15, 2025, 4:29 a.m. UTC
As preparation to support partial data transfer, add a new argument to
null_handle_rq() to pass the number of sectors to transfer. This commit
does not change the behavior.

Signed-off-by: Shin'ichiro Kawasaki <shinichiro.kawasaki@wdc.com>
---
 drivers/block/null_blk/main.c | 15 +++++++++++++--
 1 file changed, 13 insertions(+), 2 deletions(-)

Comments

Damien Le Moal Jan. 17, 2025, 11:05 p.m. UTC | #1
On 1/15/25 1:29 PM, Shin'ichiro Kawasaki wrote:
> As preparation to support partial data transfer, add a new argument to
> null_handle_rq() to pass the number of sectors to transfer. This commit
> does not change the behavior.
> 
> Signed-off-by: Shin'ichiro Kawasaki <shinichiro.kawasaki@wdc.com>

Looks good. As a nit, I would suggest renaming null_handle_rq() to the less
generic name null_handle_rw(), or may be, null_handle_data_transfer().

Regardless,

Reviewed-by: Damien Le Moal <dlemoal@kernel.org>

> ---
>  drivers/block/null_blk/main.c | 15 +++++++++++++--
>  1 file changed, 13 insertions(+), 2 deletions(-)
> 
> diff --git a/drivers/block/null_blk/main.c b/drivers/block/null_blk/main.c
> index 87037cb375c9..71c86775354e 100644
> --- a/drivers/block/null_blk/main.c
> +++ b/drivers/block/null_blk/main.c
> @@ -1263,25 +1263,36 @@ static int null_transfer(struct nullb *nullb, struct page *page,
>  	return err;
>  }
>  
> -static blk_status_t null_handle_rq(struct nullb_cmd *cmd)
> +/*
> + * Transfer data for the given request. The transfer size is capped with the
> + * nr_sectors argument.
> + */
> +static blk_status_t null_handle_rq(struct nullb_cmd *cmd, sector_t nr_sectors)
>  {
>  	struct request *rq = blk_mq_rq_from_pdu(cmd);
>  	struct nullb *nullb = cmd->nq->dev->nullb;
>  	int err = 0;
>  	unsigned int len;
>  	sector_t sector = blk_rq_pos(rq);
> +	unsigned int max_bytes = nr_sectors << SECTOR_SHIFT;
> +	unsigned int transferred_bytes = 0;
>  	struct req_iterator iter;
>  	struct bio_vec bvec;
>  
>  	spin_lock_irq(&nullb->lock);
>  	rq_for_each_segment(bvec, rq, iter) {
>  		len = bvec.bv_len;
> +		if (transferred_bytes + len > max_bytes)
> +			len = max_bytes - transferred_bytes;
>  		err = null_transfer(nullb, bvec.bv_page, len, bvec.bv_offset,
>  				     op_is_write(req_op(rq)), sector,
>  				     rq->cmd_flags & REQ_FUA);
>  		if (err)
>  			break;
>  		sector += len >> SECTOR_SHIFT;
> +		transferred_bytes += len;
> +		if (transferred_bytes >= max_bytes)
> +			break;
>  	}
>  	spin_unlock_irq(&nullb->lock);
>  
> @@ -1333,7 +1344,7 @@ blk_status_t null_handle_memory_backed(struct nullb_cmd *cmd, enum req_op op,
>  	if (op == REQ_OP_DISCARD)
>  		return null_handle_discard(dev, sector, nr_sectors);
>  
> -	return null_handle_rq(cmd);
> +	return null_handle_rq(cmd, nr_sectors);
>  }
>  
>  static void nullb_zero_read_cmd_buffer(struct nullb_cmd *cmd)
diff mbox series

Patch

diff --git a/drivers/block/null_blk/main.c b/drivers/block/null_blk/main.c
index 87037cb375c9..71c86775354e 100644
--- a/drivers/block/null_blk/main.c
+++ b/drivers/block/null_blk/main.c
@@ -1263,25 +1263,36 @@  static int null_transfer(struct nullb *nullb, struct page *page,
 	return err;
 }
 
-static blk_status_t null_handle_rq(struct nullb_cmd *cmd)
+/*
+ * Transfer data for the given request. The transfer size is capped with the
+ * nr_sectors argument.
+ */
+static blk_status_t null_handle_rq(struct nullb_cmd *cmd, sector_t nr_sectors)
 {
 	struct request *rq = blk_mq_rq_from_pdu(cmd);
 	struct nullb *nullb = cmd->nq->dev->nullb;
 	int err = 0;
 	unsigned int len;
 	sector_t sector = blk_rq_pos(rq);
+	unsigned int max_bytes = nr_sectors << SECTOR_SHIFT;
+	unsigned int transferred_bytes = 0;
 	struct req_iterator iter;
 	struct bio_vec bvec;
 
 	spin_lock_irq(&nullb->lock);
 	rq_for_each_segment(bvec, rq, iter) {
 		len = bvec.bv_len;
+		if (transferred_bytes + len > max_bytes)
+			len = max_bytes - transferred_bytes;
 		err = null_transfer(nullb, bvec.bv_page, len, bvec.bv_offset,
 				     op_is_write(req_op(rq)), sector,
 				     rq->cmd_flags & REQ_FUA);
 		if (err)
 			break;
 		sector += len >> SECTOR_SHIFT;
+		transferred_bytes += len;
+		if (transferred_bytes >= max_bytes)
+			break;
 	}
 	spin_unlock_irq(&nullb->lock);
 
@@ -1333,7 +1344,7 @@  blk_status_t null_handle_memory_backed(struct nullb_cmd *cmd, enum req_op op,
 	if (op == REQ_OP_DISCARD)
 		return null_handle_discard(dev, sector, nr_sectors);
 
-	return null_handle_rq(cmd);
+	return null_handle_rq(cmd, nr_sectors);
 }
 
 static void nullb_zero_read_cmd_buffer(struct nullb_cmd *cmd)