@@ -76,6 +76,39 @@ queue_requests_store(struct gendisk *disk, const char *page, size_t count)
return ret;
}
+static ssize_t queue_async_depth_show(struct gendisk *disk, char *page)
+{
+ if (disk->queue->async_depth)
+ return queue_var_show(disk->queue->async_depth, page);
+
+ return queue_requests_show(disk, page);
+}
+
+static ssize_t
+queue_async_depth_store(struct gendisk *disk, const char *page, size_t count)
+{
+ struct request_queue *q = disk->queue;
+ struct elevator_queue *e = q->elevator;
+ unsigned long nr;
+ int ret, err;
+
+ if (!e || !e->type->ops.async_depth_updated)
+ return -EINVAL;
+
+ ret = queue_var_store(&nr, page, count);
+ if (ret < 0)
+ return ret;
+
+ if (nr < 0 || nr >= q->nr_requests)
+ nr = 0;
+
+ err = e->type->ops.async_depth_updated(q, nr);
+ if (err)
+ return err;
+
+ return ret;
+}
+
static ssize_t queue_ra_show(struct gendisk *disk, char *page)
{
return queue_var_show(disk->bdi->ra_pages << (PAGE_SHIFT - 10), page);
@@ -440,6 +473,7 @@ static struct queue_sysfs_entry _prefix##_entry = { \
}
QUEUE_RW_ENTRY(queue_requests, "nr_requests");
+QUEUE_RW_ENTRY(queue_async_depth, "async_depth");
QUEUE_RW_ENTRY(queue_ra, "read_ahead_kb");
QUEUE_RW_ENTRY(queue_max_sectors, "max_sectors_kb");
QUEUE_RO_ENTRY(queue_max_hw_sectors, "max_hw_sectors_kb");
@@ -621,6 +655,7 @@ static struct attribute *queue_attrs[] = {
/* Request-based queue attributes that are not relevant for bio-based queues. */
static struct attribute *blk_mq_queue_attrs[] = {
&queue_requests_entry.attr,
+ &queue_async_depth_entry.attr,
&elv_iosched_entry.attr,
&queue_rq_affinity_entry.attr,
&queue_io_timeout_entry.attr,
@@ -524,6 +524,12 @@ struct request_queue {
* queue settings
*/
unsigned int nr_requests; /* Max # of requests */
+ /*
+ * Max number of async requests, used by elevator.
+ * Value range: [0, nr_requests)
+ * 0 is the default value, means unlimited.
+ */
+ unsigned int async_depth;
#ifdef CONFIG_BLK_INLINE_ENCRYPTION
struct blk_crypto_profile *crypto_profile;