Message ID | 20210310065003.573474-1-shinichiro.kawasaki@wdc.com (mailing list archive) |
---|---|
State | New, archived |
Headers | show |
Series | [v2] block: Discard page cache of zone reset target range | expand |
> switch (cmd) { > case BLKRESETZONE: > op = REQ_OP_ZONE_RESET; > + > + capacity = get_capacity(bdev->bd_disk); > + if (zrange.sector + zrange.nr_sectors <= zrange.sector || > + zrange.sector + zrange.nr_sectors > capacity) > + /* Out of range */ > + return -EINVAL; > + > + start = zrange.sector << SECTOR_SHIFT; > + end = ((zrange.sector + zrange.nr_sectors) << SECTOR_SHIFT) - 1; > + > + /* Invalidate the page cache, including dirty pages. */ > + ret = truncate_bdev_range(bdev, mode, start, end); > + if (ret) > + return ret; Can we factor this out into a truncate_zone_range() helper?
On Mar 10, 2021 / 08:45, Christoph Hellwig wrote: > > switch (cmd) { > > case BLKRESETZONE: > > op = REQ_OP_ZONE_RESET; > > + > > + capacity = get_capacity(bdev->bd_disk); > > + if (zrange.sector + zrange.nr_sectors <= zrange.sector || > > + zrange.sector + zrange.nr_sectors > capacity) > > + /* Out of range */ > > + return -EINVAL; > > + > > + start = zrange.sector << SECTOR_SHIFT; > > + end = ((zrange.sector + zrange.nr_sectors) << SECTOR_SHIFT) - 1; > > + > > + /* Invalidate the page cache, including dirty pages. */ > > + ret = truncate_bdev_range(bdev, mode, start, end); > > + if (ret) > > + return ret; > > Can we factor this out into a truncate_zone_range() helper? Yes, we can. The helper will be as follows. I will rework the patch and send v3. static int blkdev_truncate_zone_range(struct block_device *bdev, fmode_t mode, const struct blk_zone_range *zrange) { loff_t start, end; if (zrange->sector + zrange->nr_sectors <= zrange->sector || zrange->sector + zrange->nr_sectors > get_capacity(bdev->bd_disk)) /* Out of range */ return -EINVAL; start = zrange->sector << SECTOR_SHIFT; end = ((zrange->sector + zrange->nr_sectors) << SECTOR_SHIFT) - 1; return truncate_bdev_range(bdev, mode, start, end); }
diff --git a/block/blk-zoned.c b/block/blk-zoned.c index 833978c02e60..c2357e1eda18 100644 --- a/block/blk-zoned.c +++ b/block/blk-zoned.c @@ -329,6 +329,9 @@ int blkdev_zone_mgmt_ioctl(struct block_device *bdev, fmode_t mode, struct request_queue *q; struct blk_zone_range zrange; enum req_opf op; + sector_t capacity; + loff_t start, end; + int ret; if (!argp) return -EINVAL; @@ -352,6 +355,20 @@ int blkdev_zone_mgmt_ioctl(struct block_device *bdev, fmode_t mode, switch (cmd) { case BLKRESETZONE: op = REQ_OP_ZONE_RESET; + + capacity = get_capacity(bdev->bd_disk); + if (zrange.sector + zrange.nr_sectors <= zrange.sector || + zrange.sector + zrange.nr_sectors > capacity) + /* Out of range */ + return -EINVAL; + + start = zrange.sector << SECTOR_SHIFT; + end = ((zrange.sector + zrange.nr_sectors) << SECTOR_SHIFT) - 1; + + /* Invalidate the page cache, including dirty pages. */ + ret = truncate_bdev_range(bdev, mode, start, end); + if (ret) + return ret; break; case BLKOPENZONE: op = REQ_OP_ZONE_OPEN; @@ -366,8 +383,20 @@ int blkdev_zone_mgmt_ioctl(struct block_device *bdev, fmode_t mode, return -ENOTTY; } - return blkdev_zone_mgmt(bdev, op, zrange.sector, zrange.nr_sectors, - GFP_KERNEL); + ret = blkdev_zone_mgmt(bdev, op, zrange.sector, zrange.nr_sectors, + GFP_KERNEL); + + /* + * Invalidate the page cache again for zone reset: writes can only be + * direct for zoned devices so concurrent writes would not add any page + * to the page cache after/during reset. The page cache may be filled + * again due to concurrent reads though and dropping the pages for + * these is fine. + */ + if (!ret && cmd == BLKRESETZONE) + ret = truncate_bdev_range(bdev, mode, start, end); + + return ret; } static inline unsigned long *blk_alloc_zone_bitmap(int node,