@@ -264,6 +264,8 @@ static void blk_free_queue_rcu(struct rcu_head *rcu_head)
static void blk_free_queue(struct request_queue *q)
{
blk_free_queue_stats(q->stats);
+ blk_disable_sub_page_limits(&q->limits);
+
if (queue_is_mq(q))
blk_mq_release(q);
@@ -19,6 +19,11 @@
#include "blk-rq-qos.h"
#include "blk-wbt.h"
+/* Protects blk_nr_sub_page_limit_queues and blk_sub_page_limits changes. */
+static DEFINE_MUTEX(blk_sub_page_limit_lock);
+static uint32_t blk_nr_sub_page_limit_queues;
+DEFINE_STATIC_KEY_FALSE(blk_sub_page_limits);
+
void blk_queue_rq_timeout(struct request_queue *q, unsigned int timeout)
{
q->rq_timeout = timeout;
@@ -59,6 +64,7 @@ void blk_set_default_limits(struct queue_limits *lim)
lim->zoned = BLK_ZONED_NONE;
lim->zone_write_granularity = 0;
lim->dma_alignment = 511;
+ lim->sub_page_limits = false;
}
/**
@@ -101,6 +107,50 @@ void blk_queue_bounce_limit(struct request_queue *q, enum blk_bounce bounce)
}
EXPORT_SYMBOL(blk_queue_bounce_limit);
+/**
+ * blk_enable_sub_page_limits - enable support for limits below the page size
+ * @lim: request queue limits for which to enable support of these features.
+ *
+ * Enable support for max_segment_size values smaller than PAGE_SIZE and for
+ * max_hw_sectors values below PAGE_SIZE >> SECTOR_SHIFT. Support for these
+ * features is not enabled all the time because of the runtime overhead of these
+ * features.
+ */
+static void blk_enable_sub_page_limits(struct queue_limits *lim)
+{
+ if (lim->sub_page_limits)
+ return;
+
+ lim->sub_page_limits = true;
+
+ mutex_lock(&blk_sub_page_limit_lock);
+ if (++blk_nr_sub_page_limit_queues == 1)
+ static_branch_enable(&blk_sub_page_limits);
+ mutex_unlock(&blk_sub_page_limit_lock);
+}
+
+/**
+ * blk_disable_sub_page_limits - disable support for limits below the page size
+ * @lim: request queue limits for which to enable support of these features.
+ *
+ * max_segment_size values smaller than PAGE_SIZE and for max_hw_sectors values
+ * below PAGE_SIZE >> SECTOR_SHIFT. Support for these features is not enabled
+ * all the time because of the runtime overhead of these features.
+ */
+void blk_disable_sub_page_limits(struct queue_limits *lim)
+{
+ if (!lim->sub_page_limits)
+ return;
+
+ lim->sub_page_limits = false;
+
+ mutex_lock(&blk_sub_page_limit_lock);
+ WARN_ON_ONCE(blk_nr_sub_page_limit_queues <= 0);
+ if (--blk_nr_sub_page_limit_queues == 0)
+ static_branch_disable(&blk_sub_page_limits);
+ mutex_unlock(&blk_sub_page_limit_lock);
+}
+
/**
* blk_queue_max_hw_sectors - set max sectors for a request for this queue
* @q: the request queue for the device
@@ -126,6 +176,11 @@ void blk_queue_max_hw_sectors(struct request_queue *q, unsigned int max_hw_secto
unsigned int min_max_hw_sectors = PAGE_SIZE >> SECTOR_SHIFT;
unsigned int max_sectors;
+ if (max_hw_sectors < min_max_hw_sectors) {
+ blk_enable_sub_page_limits(limits);
+ min_max_hw_sectors = 1;
+ }
+
if (max_hw_sectors < min_max_hw_sectors) {
max_hw_sectors = min_max_hw_sectors;
pr_info("%s: set to minimum %u\n", __func__, max_hw_sectors);
@@ -284,6 +339,11 @@ void blk_queue_max_segment_size(struct request_queue *q, unsigned int max_size)
{
unsigned int min_max_segment_size = PAGE_SIZE;
+ if (max_size < min_max_segment_size) {
+ blk_enable_sub_page_limits(&q->limits);
+ min_max_segment_size = SECTOR_SIZE;
+ }
+
if (max_size < min_max_segment_size) {
max_size = min_max_segment_size;
pr_info("%s: set to minimum %u\n", __func__, max_size);
@@ -13,6 +13,7 @@ struct elevator_type;
#define BLK_MAX_TIMEOUT (5 * HZ)
extern struct dentry *blk_debugfs_root;
+DECLARE_STATIC_KEY_FALSE(blk_sub_page_limits);
struct blk_flush_queue {
unsigned int flush_pending_idx:1;
@@ -32,6 +33,14 @@ struct blk_flush_queue *blk_alloc_flush_queue(int node, int cmd_size,
gfp_t flags);
void blk_free_flush_queue(struct blk_flush_queue *q);
+static inline bool blk_queue_sub_page_limits(const struct queue_limits *lim)
+{
+ return static_branch_unlikely(&blk_sub_page_limits) &&
+ lim->sub_page_limits;
+}
+
+void blk_disable_sub_page_limits(struct queue_limits *q);
+
void blk_freeze_queue(struct request_queue *q);
void __blk_mq_unfreeze_queue(struct request_queue *q, bool force_atomic);
void blk_queue_start_drain(struct request_queue *q);
@@ -324,6 +324,8 @@ struct queue_limits {
* due to possible offsets.
*/
unsigned int dma_alignment;
+
+ bool sub_page_limits;
};
typedef int (*report_zones_cb)(struct blk_zone *zone, unsigned int idx,