diff mbox series

[4/8] null_blk: allow memory-backed write-zeroes-req

Message ID 20190711175328.16430-5-chaitanya.kulkarni@wdc.com (mailing list archive)
State New, archived
Headers show
Series null_blk: add missing write-zeroes and discard support | expand

Commit Message

Chaitanya Kulkarni July 11, 2019, 5:53 p.m. UTC
This patch adds support for memory backed REQ_OP_WRITE_ZEROES
operations for the null_blk request mode. We introduce two new
functions where we zeroout the sector(s) using memset which are part
of the payloadless write-zeroes request.

Signed-off-by: Chaitanya Kulkarni <chaitanya.kulkarni@wdc.com>
---
 drivers/block/null_blk_main.c | 45 ++++++++++++++++++++++++++++++++++-
 1 file changed, 44 insertions(+), 1 deletion(-)

Comments

Minwoo Im July 28, 2019, 6:11 a.m. UTC | #1
On 19-07-11 10:53:24, Chaitanya Kulkarni wrote:
> This patch adds support for memory backed REQ_OP_WRITE_ZEROES
> operations for the null_blk request mode. We introduce two new
> functions where we zeroout the sector(s) using memset which are part
> of the payloadless write-zeroes request.
> 
> Signed-off-by: Chaitanya Kulkarni <chaitanya.kulkarni@wdc.com>
> ---
>  drivers/block/null_blk_main.c | 45 ++++++++++++++++++++++++++++++++++-
>  1 file changed, 44 insertions(+), 1 deletion(-)
> 
> diff --git a/drivers/block/null_blk_main.c b/drivers/block/null_blk_main.c
> index 65da7c2d93b9..fca011a05277 100644
> --- a/drivers/block/null_blk_main.c
> +++ b/drivers/block/null_blk_main.c
> @@ -725,6 +725,24 @@ static void null_free_sector(struct nullb *nullb, sector_t sector,
>  	}
>  }
>  
> +static void null_zero_sector(struct nullb_device *d, sector_t sect,
> +			     sector_t nr_sects, bool cache)
> +{
> +	struct radix_tree_root *root = cache ? &d->cache : &d->data;
> +	struct nullb_page *t_page;
> +	unsigned int offset;
> +	void *dest;
> +
> +	t_page = radix_tree_lookup(root, sect >> PAGE_SECTORS_SHIFT);
> +	if (!t_page)
> +		return;
> +
> +	offset = (sect & SECTOR_MASK) << SECTOR_SHIFT;
> +	dest = kmap_atomic(t_page->page);
> +	memset(dest + offset, 0, SECTOR_SIZE * nr_sects);
> +	kunmap_atomic(dest);
> +}
> +
>  static struct nullb_page *null_radix_tree_insert(struct nullb *nullb, u64 idx,
>  	struct nullb_page *t_page, bool is_cache)
>  {
> @@ -1026,6 +1044,25 @@ static void null_handle_discard(struct nullb *nullb, sector_t sector, size_t n)
>  	spin_unlock_irq(&nullb->lock);
>  }
>  
> +static void null_handle_write_zeroes(struct nullb *nullb, sector_t sector,
> +				     unsigned int bytes_left)
> +{
> +	sector_t nr_sectors;
> +	size_t curr_bytes;
> +
> +	spin_lock_irq(&nullb->lock);
> +	while (bytes_left > 0) {

Hi Chaitanya,

Thanks for your support for this!

I have a simple query here.  Is there any recommended rule about using
the function argument to be changed inside of that function like
_bytes_left_?  I'm not against it, but I'd like to know if it's okay to
decrement inside of this function in code-style point-of-view.

Thanks!
diff mbox series

Patch

diff --git a/drivers/block/null_blk_main.c b/drivers/block/null_blk_main.c
index 65da7c2d93b9..fca011a05277 100644
--- a/drivers/block/null_blk_main.c
+++ b/drivers/block/null_blk_main.c
@@ -725,6 +725,24 @@  static void null_free_sector(struct nullb *nullb, sector_t sector,
 	}
 }
 
+static void null_zero_sector(struct nullb_device *d, sector_t sect,
+			     sector_t nr_sects, bool cache)
+{
+	struct radix_tree_root *root = cache ? &d->cache : &d->data;
+	struct nullb_page *t_page;
+	unsigned int offset;
+	void *dest;
+
+	t_page = radix_tree_lookup(root, sect >> PAGE_SECTORS_SHIFT);
+	if (!t_page)
+		return;
+
+	offset = (sect & SECTOR_MASK) << SECTOR_SHIFT;
+	dest = kmap_atomic(t_page->page);
+	memset(dest + offset, 0, SECTOR_SIZE * nr_sects);
+	kunmap_atomic(dest);
+}
+
 static struct nullb_page *null_radix_tree_insert(struct nullb *nullb, u64 idx,
 	struct nullb_page *t_page, bool is_cache)
 {
@@ -1026,6 +1044,25 @@  static void null_handle_discard(struct nullb *nullb, sector_t sector, size_t n)
 	spin_unlock_irq(&nullb->lock);
 }
 
+static void null_handle_write_zeroes(struct nullb *nullb, sector_t sector,
+				     unsigned int bytes_left)
+{
+	sector_t nr_sectors;
+	size_t curr_bytes;
+
+	spin_lock_irq(&nullb->lock);
+	while (bytes_left > 0) {
+		curr_bytes = min_t(size_t, bytes_left, nullb->dev->blocksize);
+		nr_sectors = curr_bytes >> SECTOR_SHIFT;
+		null_zero_sector(nullb->dev, sector, nr_sectors, false);
+		if (null_cache_active(nullb))
+			null_zero_sector(nullb->dev, sector, nr_sectors, true);
+		sector += nr_sectors;
+		bytes_left -= curr_bytes;
+	}
+	spin_unlock_irq(&nullb->lock);
+}
+
 static int null_handle_flush(struct nullb *nullb)
 {
 	int err;
@@ -1075,9 +1112,15 @@  static int null_handle_rq(struct nullb_cmd *cmd)
 
 	sector = blk_rq_pos(rq);
 
-	if (req_op(rq) == REQ_OP_DISCARD) {
+	switch (req_op(rq)) {
+	case REQ_OP_DISCARD:
 		null_handle_discard(nullb, sector, blk_rq_bytes(rq));
 		return 0;
+	case REQ_OP_WRITE_ZEROES:
+		null_handle_write_zeroes(nullb, sector, blk_rq_bytes(rq));
+		return 0;
+	default:
+		break;
 	}
 
 	spin_lock_irq(&nullb->lock);