Message ID | 20221129232813.37968-2-kch@nvidia.com (mailing list archive) |
---|---|
State | New, archived |
Headers | show |
Series | null_blk: allow REQ_OP_WRITE_ZEROES and cleanup | expand |
On Nov 29, 2022 / 15:28, Chaitanya Kulkarni wrote: > Add a helper function to enable the REQ_OP_WRITE_ZEROES operations > when null_blk. > > Since write-zeroes is a non-trivial I/O operation we need this to > add a blktest so we can test the non-trivial I/O path from the > application to the block layer. > > Signed-off-by: Chaitanya Kulkarni <kch@nvidia.com> This motivation sounds good. I tried this patch. With a quick test it looks working good for me. Please find minor comments in line. [...] > +static void null_zero_sector(struct nullb_device *d, sector_t sect, > + sector_t nr_sects, bool cache) > +{ > + struct radix_tree_root *root = cache ? &d->cache : &d->data; > + struct nullb_page *t_page; > + unsigned int offset; > + void *dest; > + > + t_page = radix_tree_lookup(root, sect >> PAGE_SECTORS_SHIFT); > + if (!t_page) > + return; > + > + offset = (sect & SECTOR_MASK) << SECTOR_SHIFT; > + dest = kmap_atomic(t_page->page); > + memset(dest + offset, 0, SECTOR_SIZE * nr_sects); > + kunmap_atomic(dest); > +} Did you consider to call null_lookup_page() for __null_lookup_page() from null_zero_sector()? It may simplify this function a bit. > + > static struct nullb_page *null_radix_tree_insert(struct nullb *nullb, u64 idx, > struct nullb_page *t_page, bool is_cache) > { > @@ -1186,6 +1211,27 @@ blk_status_t null_handle_discard(struct nullb_device *dev, > return BLK_STS_OK; > } > > +static blk_status_t null_handle_write_zeroes(struct nullb_device *dev, > + sector_t sector, sector_t nr_sectors) > +{ > + unsigned int bytes_left = nr_sectors << 9; > + struct nullb *nullb = dev->nullb; > + size_t curr_bytes; > + > + spin_lock_irq(&nullb->lock); > + while (bytes_left > 0) { > + curr_bytes = min_t(size_t, bytes_left, nullb->dev->blocksize); > + nr_sectors = curr_bytes >> SECTOR_SHIFT; > + null_zero_sector(nullb->dev, sector, nr_sectors, false); > + if (null_cache_active(nullb)) > + null_zero_sector(nullb->dev, sector, nr_sectors, true); > + sector += nr_sectors; > + bytes_left -= curr_bytes; > + } > + spin_unlock_irq(&nullb->lock); > + return BLK_STS_OK; > +} > + > static int null_handle_flush(struct nullb *nullb) > { > int err; > @@ -1352,6 +1398,9 @@ static inline blk_status_t null_handle_memory_backed(struct nullb_cmd *cmd, > if (op == REQ_OP_DISCARD) > return null_handle_discard(dev, sector, nr_sectors); > > + if (op == REQ_OP_WRITE_ZEROES) > + return null_handle_write_zeroes(dev, sector, nr_sectors); > + > if (dev->queue_mode == NULL_Q_BIO) > err = null_handle_bio(cmd); > else > @@ -1800,6 +1849,13 @@ static void null_config_discard(struct nullb *nullb) > blk_queue_max_discard_sectors(nullb->q, UINT_MAX >> 9); > } > > +static void null_config_write_zeroes(struct nullb *nullb) > +{ > + if (!nullb->dev->write_zeroes) > + return; > + blk_queue_max_write_zeroes_sectors(nullb->q, UINT_MAX >> 9); Just comment: this value UINT_MAX >> 9 sounds a bit weird, but probably ok. This value was introduced by commit 306eb6b4ad4f ("nullb: support discard") to call blk_queue_max_discard_sectors(). I guess you chose the same value for write zeroes. > +} > + > static const struct block_device_operations null_bio_ops = { > .owner = THIS_MODULE, > .submit_bio = null_submit_bio, > @@ -2111,6 +2167,7 @@ static int null_add_dev(struct nullb_device *dev) > blk_queue_virt_boundary(nullb->q, PAGE_SIZE - 1); > > null_config_discard(nullb); > + null_config_write_zeroes(nullb); > > if (config_item_name(&dev->item)) { > /* Use configfs dir name as the device name */ > diff --git a/drivers/block/null_blk/null_blk.h b/drivers/block/null_blk/null_blk.h > index 94ff68052b1e..2c0c9c29158f 100644 > --- a/drivers/block/null_blk/null_blk.h > +++ b/drivers/block/null_blk/null_blk.h > @@ -111,6 +111,7 @@ struct nullb_device { > bool power; /* power on/off the device */ > bool memory_backed; /* if data is stored in memory */ > bool discard; /* if support discard */ > + bool write_zeroes; /* if support write_zeroes */ > bool zoned; /* if device is zoned */ > bool virt_boundary; /* virtual boundary on/off for the device */ > bool no_sched; /* no IO scheduler for the device */ > -- > 2.29.0 >
>> Signed-off-by: Chaitanya Kulkarni <kch@nvidia.com> > > This motivation sounds good. I tried this patch. With a quick test it looks > working good for me. Please find minor comments in line. > > [...] > >> +static void null_zero_sector(struct nullb_device *d, sector_t sect, >> + sector_t nr_sects, bool cache) >> +{ >> + struct radix_tree_root *root = cache ? &d->cache : &d->data; >> + struct nullb_page *t_page; >> + unsigned int offset; >> + void *dest; >> + >> + t_page = radix_tree_lookup(root, sect >> PAGE_SECTORS_SHIFT); >> + if (!t_page) >> + return; >> + >> + offset = (sect & SECTOR_MASK) << SECTOR_SHIFT; >> + dest = kmap_atomic(t_page->page); >> + memset(dest + offset, 0, SECTOR_SIZE * nr_sects); >> + kunmap_atomic(dest); >> +} > > Did you consider to call null_lookup_page() for __null_lookup_page() from > null_zero_sector()? It may simplify this function a bit. > I found this clear and easy than going over call chain when debugging. >> + >> static struct nullb_page *null_radix_tree_insert(struct nullb *nullb, u64 idx, [...] >> >> +static void null_config_write_zeroes(struct nullb *nullb) >> +{ >> + if (!nullb->dev->write_zeroes) >> + return; >> + blk_queue_max_write_zeroes_sectors(nullb->q, UINT_MAX >> 9); > > Just comment: this value UINT_MAX >> 9 sounds a bit weird, but probably ok. This > value was introduced by commit 306eb6b4ad4f ("nullb: support discard") to call > blk_queue_max_discard_sectors(). I guess you chose the same value for write > zeroes. > Yes indeed, plz have a look end patches to allow user to set this value. -ck
On Nov 30, 2022 / 23:29, Chaitanya Kulkarni wrote: > >> Signed-off-by: Chaitanya Kulkarni <kch@nvidia.com> > > > > This motivation sounds good. I tried this patch. With a quick test it looks > > working good for me. Please find minor comments in line. > > > > [...] > > > >> +static void null_zero_sector(struct nullb_device *d, sector_t sect, > >> + sector_t nr_sects, bool cache) > >> +{ > >> + struct radix_tree_root *root = cache ? &d->cache : &d->data; > >> + struct nullb_page *t_page; > >> + unsigned int offset; > >> + void *dest; > >> + > >> + t_page = radix_tree_lookup(root, sect >> PAGE_SECTORS_SHIFT); > >> + if (!t_page) > >> + return; > >> + > >> + offset = (sect & SECTOR_MASK) << SECTOR_SHIFT; > >> + dest = kmap_atomic(t_page->page); > >> + memset(dest + offset, 0, SECTOR_SIZE * nr_sects); > >> + kunmap_atomic(dest); > >> +} > > > > Did you consider to call null_lookup_page() for __null_lookup_page() from > > null_zero_sector()? It may simplify this function a bit. > > > > I found this clear and easy than going over call chain when > debugging. Okay, then the patch looks good to me :) Reviewed-by: Shin'ichiro Kawasaki <shinichiro.kawasaki@wdc.com>
diff --git a/drivers/block/null_blk/main.c b/drivers/block/null_blk/main.c index 1f154f92f4c2..2d592b4eb815 100644 --- a/drivers/block/null_blk/main.c +++ b/drivers/block/null_blk/main.c @@ -209,6 +209,10 @@ static bool g_discard; module_param_named(discard, g_discard, bool, 0444); MODULE_PARM_DESC(discard, "Support discard operations (requires memory-backed null_blk device). Default: false"); +static bool g_write_zeroes; +module_param_named(write_zeroes, g_write_zeroes, bool, 0444); +MODULE_PARM_DESC(write_zeroes, "Support write-zeores operations. Default: false"); + static unsigned long g_cache_size; module_param_named(cache_size, g_cache_size, ulong, 0444); MODULE_PARM_DESC(mbps, "Cache size in MiB for memory-backed device. Default: 0 (none)"); @@ -416,6 +420,7 @@ NULLB_DEVICE_ATTR(blocking, bool, NULL); NULLB_DEVICE_ATTR(use_per_node_hctx, bool, NULL); NULLB_DEVICE_ATTR(memory_backed, bool, NULL); NULLB_DEVICE_ATTR(discard, bool, NULL); +NULLB_DEVICE_ATTR(write_zeroes, bool, NULL); NULLB_DEVICE_ATTR(mbps, uint, NULL); NULLB_DEVICE_ATTR(cache_size, ulong, NULL); NULLB_DEVICE_ATTR(zoned, bool, NULL); @@ -540,6 +545,7 @@ static struct configfs_attribute *nullb_device_attrs[] = { &nullb_device_attr_power, &nullb_device_attr_memory_backed, &nullb_device_attr_discard, + &nullb_device_attr_write_zeroes, &nullb_device_attr_mbps, &nullb_device_attr_cache_size, &nullb_device_attr_badblocks, @@ -614,7 +620,7 @@ static ssize_t memb_group_features_show(struct config_item *item, char *page) "poll_queues,power,queue_mode,shared_tag_bitmap,size," "submit_queues,use_per_node_hctx,virt_boundary,zoned," "zone_capacity,zone_max_active,zone_max_open," - "zone_nr_conv,zone_size\n"); + "zone_nr_conv,zone_size,write_zeroes\n"); } CONFIGFS_ATTR_RO(memb_group_, features); @@ -678,6 +684,7 @@ static struct nullb_device *null_alloc_dev(void) dev->blocking = g_blocking; dev->memory_backed = g_memory_backed; dev->discard = g_discard; + dev->write_zeroes = g_write_zeroes; dev->cache_size = g_cache_size; dev->mbps = g_mbps; dev->use_per_node_hctx = g_use_per_node_hctx; @@ -870,6 +877,24 @@ static void null_free_sector(struct nullb *nullb, sector_t sector, } } +static void null_zero_sector(struct nullb_device *d, sector_t sect, + sector_t nr_sects, bool cache) +{ + struct radix_tree_root *root = cache ? &d->cache : &d->data; + struct nullb_page *t_page; + unsigned int offset; + void *dest; + + t_page = radix_tree_lookup(root, sect >> PAGE_SECTORS_SHIFT); + if (!t_page) + return; + + offset = (sect & SECTOR_MASK) << SECTOR_SHIFT; + dest = kmap_atomic(t_page->page); + memset(dest + offset, 0, SECTOR_SIZE * nr_sects); + kunmap_atomic(dest); +} + static struct nullb_page *null_radix_tree_insert(struct nullb *nullb, u64 idx, struct nullb_page *t_page, bool is_cache) { @@ -1186,6 +1211,27 @@ blk_status_t null_handle_discard(struct nullb_device *dev, return BLK_STS_OK; } +static blk_status_t null_handle_write_zeroes(struct nullb_device *dev, + sector_t sector, sector_t nr_sectors) +{ + unsigned int bytes_left = nr_sectors << 9; + struct nullb *nullb = dev->nullb; + size_t curr_bytes; + + spin_lock_irq(&nullb->lock); + while (bytes_left > 0) { + curr_bytes = min_t(size_t, bytes_left, nullb->dev->blocksize); + nr_sectors = curr_bytes >> SECTOR_SHIFT; + null_zero_sector(nullb->dev, sector, nr_sectors, false); + if (null_cache_active(nullb)) + null_zero_sector(nullb->dev, sector, nr_sectors, true); + sector += nr_sectors; + bytes_left -= curr_bytes; + } + spin_unlock_irq(&nullb->lock); + return BLK_STS_OK; +} + static int null_handle_flush(struct nullb *nullb) { int err; @@ -1352,6 +1398,9 @@ static inline blk_status_t null_handle_memory_backed(struct nullb_cmd *cmd, if (op == REQ_OP_DISCARD) return null_handle_discard(dev, sector, nr_sectors); + if (op == REQ_OP_WRITE_ZEROES) + return null_handle_write_zeroes(dev, sector, nr_sectors); + if (dev->queue_mode == NULL_Q_BIO) err = null_handle_bio(cmd); else @@ -1800,6 +1849,13 @@ static void null_config_discard(struct nullb *nullb) blk_queue_max_discard_sectors(nullb->q, UINT_MAX >> 9); } +static void null_config_write_zeroes(struct nullb *nullb) +{ + if (!nullb->dev->write_zeroes) + return; + blk_queue_max_write_zeroes_sectors(nullb->q, UINT_MAX >> 9); +} + static const struct block_device_operations null_bio_ops = { .owner = THIS_MODULE, .submit_bio = null_submit_bio, @@ -2111,6 +2167,7 @@ static int null_add_dev(struct nullb_device *dev) blk_queue_virt_boundary(nullb->q, PAGE_SIZE - 1); null_config_discard(nullb); + null_config_write_zeroes(nullb); if (config_item_name(&dev->item)) { /* Use configfs dir name as the device name */ diff --git a/drivers/block/null_blk/null_blk.h b/drivers/block/null_blk/null_blk.h index 94ff68052b1e..2c0c9c29158f 100644 --- a/drivers/block/null_blk/null_blk.h +++ b/drivers/block/null_blk/null_blk.h @@ -111,6 +111,7 @@ struct nullb_device { bool power; /* power on/off the device */ bool memory_backed; /* if data is stored in memory */ bool discard; /* if support discard */ + bool write_zeroes; /* if support write_zeroes */ bool zoned; /* if device is zoned */ bool virt_boundary; /* virtual boundary on/off for the device */ bool no_sched; /* no IO scheduler for the device */
Add a helper function to enable the REQ_OP_WRITE_ZEROES operations when null_blk. Since write-zeroes is a non-trivial I/O operation we need this to add a blktest so we can test the non-trivial I/O path from the application to the block layer. Signed-off-by: Chaitanya Kulkarni <kch@nvidia.com> --- drivers/block/null_blk/main.c | 59 ++++++++++++++++++++++++++++++- drivers/block/null_blk/null_blk.h | 1 + 2 files changed, 59 insertions(+), 1 deletion(-)