@@ -79,6 +79,8 @@ int __blkdev_issue_discard(struct block_device *bdev, sector_t sector,
WARN_ON_ONCE((req_sects << 9) > UINT_MAX);
+ pr_info("%s %d sector %llu nr_sects %llu\n",
+ __func__, __LINE__, sector, nr_sects);
bio = blk_next_bio(bio, bdev, sector, 0, op, gfp_mask);
bio->bi_iter.bi_size = req_sects << 9;
sector += req_sects;
@@ -166,6 +168,8 @@ static int __blkdev_issue_write_same(struct block_device *bdev, sector_t sector,
max_write_same_sectors = bio_allowed_max_sectors(q);
while (nr_sects) {
+ pr_info("%s %d sector %llu nr_sects %llu\n",
+ __func__, __LINE__, sector, nr_sects);
bio = blk_next_bio(bio, bdev, sector, 1, REQ_OP_WRITE_SAME,
gfp_mask);
bio->bi_vcnt = 1;
@@ -238,6 +242,8 @@ static int __blkdev_issue_write_zeroes(struct block_device *bdev,
return -EOPNOTSUPP;
while (nr_sects) {
+ pr_info("%s %d sector %llu nr_sects %llu\n",
+ __func__, __LINE__, sector, nr_sects);
bio = blk_next_bio(bio, bdev, sector, 0, op | opf, gfp_mask);
if (nr_sects > max_write_zeroes_sectors) {
@@ -281,6 +287,8 @@ static int __blkdev_issue_zero_pages(struct block_device *bdev,
return -EPERM;
while (nr_sects != 0) {
+ pr_info("%s %d sector %llu nr_sects %llu\n",
+ __func__, __LINE__, sector, nr_sects);
bio = blk_next_bio(bio, bdev, sector, nr_pages, REQ_OP_WRITE,
gfp_mask);
@@ -215,6 +215,8 @@ static int blkdev_zone_reset_all_emulated(struct block_device *bdev,
continue;
}
+ pr_info("%s %d sector %llu nr_sects 0\n",
+ __func__, __LINE__, sector);
bio = blk_next_bio(bio, bdev, sector, 0,
REQ_OP_ZONE_RESET | REQ_SYNC, gfp_mask);
sector += zone_sectors;
@@ -301,6 +303,8 @@ int blkdev_zone_mgmt(struct block_device *bdev, enum req_opf op,
}
while (sector < end_sector) {
+ pr_info("%s %d sector %llu nr_sects 0\n",
+ __func__, __LINE__, sector);
bio = blk_next_bio(bio, bdev, sector, 0, op | REQ_SYNC,
gfp_mask);
sector += zone_sectors;
@@ -80,6 +80,12 @@ enum {
NULL_Q_MQ = 2,
};
+static bool g_write_zeroes = false;
+module_param_named(write_zeroes, g_write_zeroes, bool, 0444);
+
+static bool g_reset_all = false;
+module_param_named(reset_all, g_reset_all, bool, 0444);
+
static bool g_virt_boundary = false;
module_param_named(virt_boundary, g_virt_boundary, bool, 0444);
MODULE_PARM_DESC(virt_boundary, "Require a virtual boundary for the device. Default: False");
@@ -656,6 +662,7 @@ static struct nullb_device *null_alloc_dev(void)
dev->zone_nr_conv = g_zone_nr_conv;
dev->zone_max_open = g_zone_max_open;
dev->zone_max_active = g_zone_max_active;
+ dev->reset_all = g_reset_all;
dev->virt_boundary = g_virt_boundary;
return dev;
}
@@ -1749,25 +1756,12 @@ static void null_del_dev(struct nullb *nullb)
static void null_config_discard(struct nullb *nullb)
{
- if (nullb->dev->discard == false)
- return;
-
- if (!nullb->dev->memory_backed) {
- nullb->dev->discard = false;
- pr_info("discard option is ignored without memory backing\n");
- return;
- }
-
- if (nullb->dev->zoned) {
- nullb->dev->discard = false;
- pr_info("discard option is ignored in zoned mode\n");
- return;
- }
-
nullb->q->limits.discard_granularity = nullb->dev->blocksize;
nullb->q->limits.discard_alignment = nullb->dev->blocksize;
blk_queue_max_discard_sectors(nullb->q, UINT_MAX >> 9);
blk_queue_flag_set(QUEUE_FLAG_DISCARD, nullb->q);
+ if (g_write_zeroes)
+ blk_queue_max_write_zeroes_sectors(nullb->q, UINT_MAX >> 9);
}
static const struct block_device_operations null_bio_ops = {
@@ -102,6 +102,7 @@ struct nullb_device {
bool power; /* power on/off the device */
bool memory_backed; /* if data is stored in memory */
bool discard; /* if support discard */
+ bool reset_all; /* if support reset_all */
bool zoned; /* if device is zoned */
bool virt_boundary; /* virtual boundary on/off for the device */
};
@@ -157,7 +157,8 @@ int null_register_zoned_dev(struct nullb *nullb)
struct request_queue *q = nullb->q;
blk_queue_set_zoned(nullb->disk, BLK_ZONED_HM);
- blk_queue_flag_set(QUEUE_FLAG_ZONE_RESETALL, q);
+ if (dev->reset_all)
+ blk_queue_flag_set(QUEUE_FLAG_ZONE_RESETALL, q);
blk_queue_required_elevator_features(q, ELEVATOR_F_ZBD_SEQ_WRITE);
if (queue_is_mq(q)) {