@@ -37,6 +37,7 @@ void blk_set_stacking_limits(struct queue_limits *lim)
memset(lim, 0, sizeof(*lim));
lim->logical_block_size = lim->physical_block_size = lim->io_min = 512;
lim->discard_granularity = 512;
+ lim->max_user_discard_sectors = UINT_MAX;
lim->dma_alignment = 511;
lim->seg_boundary_mask = BLK_SEG_BOUNDARY_MASK;
@@ -160,7 +161,9 @@ static int blk_validate_limits(struct queue_limits *lim)
if (!lim->max_segments)
lim->max_segments = BLK_MAX_SEGMENTS;
- lim->max_discard_sectors = lim->max_hw_discard_sectors;
+ lim->max_discard_sectors =
+ min(lim->max_hw_discard_sectors, lim->max_user_discard_sectors);
+
if (!lim->max_discard_segments)
lim->max_discard_segments = 1;
@@ -226,6 +229,12 @@ static int blk_validate_limits(struct queue_limits *lim)
*/
int blk_set_default_limits(struct queue_limits *lim)
{
+ /*
+ * Most defaults are set by capping the bounds in blk_validate_limits,
+ * but max_user_discard_sectors is special and needs an explicit
+ * initialization to the max value here.
+ */
+ lim->max_user_discard_sectors = UINT_MAX;
return blk_validate_limits(lim);
}
@@ -347,8 +356,11 @@ EXPORT_SYMBOL(blk_queue_chunk_sectors);
void blk_queue_max_discard_sectors(struct request_queue *q,
unsigned int max_discard_sectors)
{
- q->limits.max_hw_discard_sectors = max_discard_sectors;
- q->limits.max_discard_sectors = max_discard_sectors;
+ struct queue_limits *lim = &q->limits;
+
+ lim->max_hw_discard_sectors = max_discard_sectors;
+ lim->max_discard_sectors =
+ min(max_discard_sectors, lim->max_user_discard_sectors);
}
EXPORT_SYMBOL(blk_queue_max_discard_sectors);
@@ -174,23 +174,22 @@ static ssize_t queue_discard_max_show(struct request_queue *q, char *page)
static ssize_t queue_discard_max_store(struct request_queue *q,
const char *page, size_t count)
{
- unsigned long max_discard;
- ssize_t ret = queue_var_store(&max_discard, page, count);
+ unsigned long max_discard_bytes;
+ ssize_t ret;
+ ret = queue_var_store(&max_discard_bytes, page, count);
if (ret < 0)
return ret;
- if (max_discard & (q->limits.discard_granularity - 1))
+ if (max_discard_bytes & (q->limits.discard_granularity - 1))
return -EINVAL;
- max_discard >>= 9;
- if (max_discard > UINT_MAX)
+ if ((max_discard_bytes >> SECTOR_SHIFT) > UINT_MAX)
return -EINVAL;
- if (max_discard > q->limits.max_hw_discard_sectors)
- max_discard = q->limits.max_hw_discard_sectors;
-
- q->limits.max_discard_sectors = max_discard;
+ q->limits.max_user_discard_sectors = max_discard_bytes >> SECTOR_SHIFT;
+ q->limits.max_discard_sectors = min(q->limits.max_hw_discard_sectors,
+ q->limits.max_user_discard_sectors);
return ret;
}
@@ -290,6 +290,7 @@ struct queue_limits {
unsigned int io_opt;
unsigned int max_discard_sectors;
unsigned int max_hw_discard_sectors;
+ unsigned int max_user_discard_sectors;
unsigned int max_secure_erase_sectors;
unsigned int max_write_zeroes_sectors;
unsigned int max_zone_append_sectors;