diff mbox series

[v3,2/3] nvme: add 'latency' iopolicy

Message ID 20240509204324.832846-3-jmeneghi@redhat.com (mailing list archive)
State New
Headers show
Series None | expand

Commit Message

John Meneghini May 9, 2024, 8:43 p.m. UTC
From: Hannes Reinecke <hare@kernel.org>

Add a latency-based I/O policy for multipathing. It uses the blk-nodelat
latency tracker to provide latencies for each node, and schedules
I/O on the path with the least latency for the submitting node.

Signed-off-by: Hannes Reinecke <hare@kernel.org>

Make this compile when CONFIG_BLK_NODE_LATENCY is not set.
Advertise the 'latency' iopolicy in modinfo.

Signed-off-by: John Meneghini <jmeneghi@redhat.com>
---
 drivers/nvme/host/multipath.c | 63 ++++++++++++++++++++++++++++++-----
 drivers/nvme/host/nvme.h      |  1 +
 2 files changed, 55 insertions(+), 9 deletions(-)

Comments

Damien Le Moal May 10, 2024, 7:17 a.m. UTC | #1
On 5/10/24 05:43, John Meneghini wrote:
> From: Hannes Reinecke <hare@kernel.org>
> 
> Add a latency-based I/O policy for multipathing. It uses the blk-nodelat
> latency tracker to provide latencies for each node, and schedules
> I/O on the path with the least latency for the submitting node.
> 
> Signed-off-by: Hannes Reinecke <hare@kernel.org>
> 
> Make this compile when CONFIG_BLK_NODE_LATENCY is not set.
> Advertise the 'latency' iopolicy in modinfo.
> 
> Signed-off-by: John Meneghini <jmeneghi@redhat.com>
> ---
>  drivers/nvme/host/multipath.c | 63 ++++++++++++++++++++++++++++++-----
>  drivers/nvme/host/nvme.h      |  1 +
>  2 files changed, 55 insertions(+), 9 deletions(-)
> 
> diff --git a/drivers/nvme/host/multipath.c b/drivers/nvme/host/multipath.c
> index d916a5ddf5d4..e9330bb1990b 100644
> --- a/drivers/nvme/host/multipath.c
> +++ b/drivers/nvme/host/multipath.c
> @@ -18,6 +18,7 @@ static const char *nvme_iopolicy_names[] = {
>  	[NVME_IOPOLICY_NUMA]	= "numa",
>  	[NVME_IOPOLICY_RR]	= "round-robin",
>  	[NVME_IOPOLICY_QD]      = "queue-depth",
> +	[NVME_IOPOLICY_LAT]	= "latency",
>  };
>  
>  static int iopolicy = NVME_IOPOLICY_NUMA;
> @@ -32,6 +33,10 @@ static int nvme_set_iopolicy(const char *val, const struct kernel_param *kp)
>  		iopolicy = NVME_IOPOLICY_RR;
>  	else if (!strncmp(val, "queue-depth", 11))
>  		iopolicy = NVME_IOPOLICY_QD;
> +#ifdef CONFIG_BLK_NODE_LATENCY
> +	else if (!strncmp(val, "latency", 7))
> +		iopolicy = NVME_IOPOLICY_LAT;
> +#endif
>  	else
>  		return -EINVAL;
>  
> @@ -43,10 +48,36 @@ static int nvme_get_iopolicy(char *buf, const struct kernel_param *kp)
>  	return sprintf(buf, "%s\n", nvme_iopolicy_names[iopolicy]);
>  }
>  
> +static int nvme_activate_iopolicy(struct nvme_subsystem *subsys, int iopolicy)
> +{
> +	struct nvme_ns_head *h;
> +	struct nvme_ns *ns;
> +	bool enable = iopolicy == NVME_IOPOLICY_LAT;
> +	int ret = 0;
> +
> +	mutex_lock(&subsys->lock);
> +	list_for_each_entry(h, &subsys->nsheads, entry) {
> +		list_for_each_entry_rcu(ns, &h->list, siblings) {
> +			if (enable) {
> +				ret = blk_nlat_enable(ns->disk);
> +				if (ret)
> +					break;
> +			} else
> +				blk_nlat_disable(ns->disk);

Missing curly brackets for the else.

> +		}
> +	}
> +	mutex_unlock(&subsys->lock);
> +	return ret;
> +}
> +
>  module_param_call(iopolicy, nvme_set_iopolicy, nvme_get_iopolicy,
>  	&iopolicy, 0644);
>  MODULE_PARM_DESC(iopolicy,
> +#if defined(CONFIG_BLK_NODE_LATENCY)

What is so special about the latency policy that it needs to be conditionally
defined ? I missed that point. Why not drop CONFIG_BLK_NODE_LATENCY ?

> +	"Default multipath I/O policy; 'numa' (default) , 'round-robin', 'queue-depth' or 'latency'");
> +#else
>  	"Default multipath I/O policy; 'numa' (default) , 'round-robin' or 'queue-depth'");
> +#endif
>  
>  void nvme_mpath_default_iopolicy(struct nvme_subsystem *subsys)
>  {
> @@ -250,14 +281,16 @@ static struct nvme_ns *__nvme_find_path(struct nvme_ns_head *head, int node)
>  {
>  	int found_distance = INT_MAX, fallback_distance = INT_MAX, distance;
>  	struct nvme_ns *found = NULL, *fallback = NULL, *ns;
> +	int iopolicy = READ_ONCE(head->subsys->iopolicy);
>  
>  	list_for_each_entry_rcu(ns, &head->list, siblings) {
>  		if (nvme_path_is_disabled(ns))
>  			continue;
>  
> -		if (ns->ctrl->numa_node != NUMA_NO_NODE &&
> -		    READ_ONCE(head->subsys->iopolicy) == NVME_IOPOLICY_NUMA)
> +		if (iopolicy == NVME_IOPOLICY_NUMA)
>  			distance = node_distance(node, ns->ctrl->numa_node);
> +		else if (iopolicy == NVME_IOPOLICY_LAT)
> +			distance = blk_nlat_latency(ns->disk, node);
>  		else
>  			distance = LOCAL_DISTANCE;
>  
> @@ -381,8 +414,8 @@ static inline bool nvme_path_is_optimized(struct nvme_ns *ns)
>  
>  inline struct nvme_ns *nvme_find_path(struct nvme_ns_head *head)
>  {
> -	int iopolicy = READ_ONCE(head->subsys->iopolicy);
>  	int node;
> +	int iopolicy = READ_ONCE(head->subsys->iopolicy);

No need to move this line.

>  	struct nvme_ns *ns;
>  
>  	/*
> @@ -401,8 +434,8 @@ inline struct nvme_ns *nvme_find_path(struct nvme_ns_head *head)
>  
>  	if (iopolicy == NVME_IOPOLICY_RR)
>  		return nvme_round_robin_path(head, node, ns);
> -
> -	if (unlikely(!nvme_path_is_optimized(ns)))
> +	if (iopolicy == NVME_IOPOLICY_LAT ||
> +	    unlikely(!nvme_path_is_optimized(ns)))
>  		return __nvme_find_path(head, node);
>  	return ns;
>  }
> @@ -872,15 +905,18 @@ static ssize_t nvme_subsys_iopolicy_store(struct device *dev,
>  {
>  	struct nvme_subsystem *subsys =
>  		container_of(dev, struct nvme_subsystem, dev);
> -	int i;
> +	int i, ret;
>  
>  	for (i = 0; i < ARRAY_SIZE(nvme_iopolicy_names); i++) {
>  		if (sysfs_streq(buf, nvme_iopolicy_names[i])) {
> -			nvme_subsys_iopolicy_update(subsys, i);
> -			return count;
> +			ret = nvme_activate_iopolicy(subsys, i);
> +			if (!ret) {
> +				nvme_subsys_iopolicy_update(subsys, i);
> +				return count;
> +			}
> +			return ret;

It would be nicer to have this as:

			if (ret)
				break
			nvme_subsys_iopolicy_update(subsys, i);
			return count;

>  		}
>  	}
> -

whiteline change.

>  	return -EINVAL;

And "return ret;" here with ret initialized to -EINVAL when declared.

>  }
>  SUBSYS_ATTR_RW(iopolicy, S_IRUGO | S_IWUSR,
> @@ -916,6 +952,15 @@ static int nvme_lookup_ana_group_desc(struct nvme_ctrl *ctrl,
>  
>  void nvme_mpath_add_disk(struct nvme_ns *ns, __le32 anagrpid)
>  {
> +	if (!blk_nlat_init(ns->disk) &&
> +	    READ_ONCE(ns->head->subsys->iopolicy) == NVME_IOPOLICY_LAT) {
> +		int ret = blk_nlat_enable(ns->disk);
> +
> +		if (unlikely(ret))
> +			pr_warn("%s: Failed to enable latency tracking, error %d\n",
> +				ns->disk->disk_name, ret);
> +	}
> +
>  	if (nvme_ctrl_use_ana(ns->ctrl)) {
>  		struct nvme_ana_group_desc desc = {
>  			.grpid = anagrpid,
> diff --git a/drivers/nvme/host/nvme.h b/drivers/nvme/host/nvme.h
> index a557b4577c01..66bf003a6c48 100644
> --- a/drivers/nvme/host/nvme.h
> +++ b/drivers/nvme/host/nvme.h
> @@ -411,6 +411,7 @@ enum nvme_iopolicy {
>  	NVME_IOPOLICY_NUMA,
>  	NVME_IOPOLICY_RR,
>  	NVME_IOPOLICY_QD,
> +	NVME_IOPOLICY_LAT,
>  };
>  
>  struct nvme_subsystem {
Hannes Reinecke May 10, 2024, 10:03 a.m. UTC | #2
On 5/10/24 09:17, Damien Le Moal wrote:
> On 5/10/24 05:43, John Meneghini wrote:
>> From: Hannes Reinecke <hare@kernel.org>
>>
>> Add a latency-based I/O policy for multipathing. It uses the blk-nodelat
>> latency tracker to provide latencies for each node, and schedules
>> I/O on the path with the least latency for the submitting node.
>>
>> Signed-off-by: Hannes Reinecke <hare@kernel.org>
>>
>> Make this compile when CONFIG_BLK_NODE_LATENCY is not set.
>> Advertise the 'latency' iopolicy in modinfo.
>>
>> Signed-off-by: John Meneghini <jmeneghi@redhat.com>
>> ---
>>   drivers/nvme/host/multipath.c | 63 ++++++++++++++++++++++++++++++-----
>>   drivers/nvme/host/nvme.h      |  1 +
>>   2 files changed, 55 insertions(+), 9 deletions(-)
>>
>> diff --git a/drivers/nvme/host/multipath.c b/drivers/nvme/host/multipath.c
>> index d916a5ddf5d4..e9330bb1990b 100644
>> --- a/drivers/nvme/host/multipath.c
>> +++ b/drivers/nvme/host/multipath.c
>> @@ -18,6 +18,7 @@ static const char *nvme_iopolicy_names[] = {
>>   	[NVME_IOPOLICY_NUMA]	= "numa",
>>   	[NVME_IOPOLICY_RR]	= "round-robin",
>>   	[NVME_IOPOLICY_QD]      = "queue-depth",
>> +	[NVME_IOPOLICY_LAT]	= "latency",
>>   };
>>   
>>   static int iopolicy = NVME_IOPOLICY_NUMA;
>> @@ -32,6 +33,10 @@ static int nvme_set_iopolicy(const char *val, const struct kernel_param *kp)
>>   		iopolicy = NVME_IOPOLICY_RR;
>>   	else if (!strncmp(val, "queue-depth", 11))
>>   		iopolicy = NVME_IOPOLICY_QD;
>> +#ifdef CONFIG_BLK_NODE_LATENCY
>> +	else if (!strncmp(val, "latency", 7))
>> +		iopolicy = NVME_IOPOLICY_LAT;
>> +#endif
>>   	else
>>   		return -EINVAL;
>>   
>> @@ -43,10 +48,36 @@ static int nvme_get_iopolicy(char *buf, const struct kernel_param *kp)
>>   	return sprintf(buf, "%s\n", nvme_iopolicy_names[iopolicy]);
>>   }
>>   
>> +static int nvme_activate_iopolicy(struct nvme_subsystem *subsys, int iopolicy)
>> +{
>> +	struct nvme_ns_head *h;
>> +	struct nvme_ns *ns;
>> +	bool enable = iopolicy == NVME_IOPOLICY_LAT;
>> +	int ret = 0;
>> +
>> +	mutex_lock(&subsys->lock);
>> +	list_for_each_entry(h, &subsys->nsheads, entry) {
>> +		list_for_each_entry_rcu(ns, &h->list, siblings) {
>> +			if (enable) {
>> +				ret = blk_nlat_enable(ns->disk);
>> +				if (ret)
>> +					break;
>> +			} else
>> +				blk_nlat_disable(ns->disk);
> 
> Missing curly brackets for the else.
> 
Ok.

>> +		}
>> +	}
>> +	mutex_unlock(&subsys->lock);
>> +	return ret;
>> +}
>> +
>>   module_param_call(iopolicy, nvme_set_iopolicy, nvme_get_iopolicy,
>>   	&iopolicy, 0644);
>>   MODULE_PARM_DESC(iopolicy,
>> +#if defined(CONFIG_BLK_NODE_LATENCY)
> 
> What is so special about the latency policy that it needs to be conditionally
> defined ? I missed that point. Why not drop CONFIG_BLK_NODE_LATENCY ?
> 
The 'latency' policy is using the blk-rqos infrastructure, which in 
itself might not be compiled in.
So we don't want the user to give a false impression here.

>> +	"Default multipath I/O policy; 'numa' (default) , 'round-robin', 'queue-depth' or 'latency'");
>> +#else
>>   	"Default multipath I/O policy; 'numa' (default) , 'round-robin' or 'queue-depth'");
>> +#endif
>>   
>>   void nvme_mpath_default_iopolicy(struct nvme_subsystem *subsys)
>>   {
>> @@ -250,14 +281,16 @@ static struct nvme_ns *__nvme_find_path(struct nvme_ns_head *head, int node)
>>   {
>>   	int found_distance = INT_MAX, fallback_distance = INT_MAX, distance;
>>   	struct nvme_ns *found = NULL, *fallback = NULL, *ns;
>> +	int iopolicy = READ_ONCE(head->subsys->iopolicy);
>>   
>>   	list_for_each_entry_rcu(ns, &head->list, siblings) {
>>   		if (nvme_path_is_disabled(ns))
>>   			continue;
>>   
>> -		if (ns->ctrl->numa_node != NUMA_NO_NODE &&
>> -		    READ_ONCE(head->subsys->iopolicy) == NVME_IOPOLICY_NUMA)
>> +		if (iopolicy == NVME_IOPOLICY_NUMA)
>>   			distance = node_distance(node, ns->ctrl->numa_node);
>> +		else if (iopolicy == NVME_IOPOLICY_LAT)
>> +			distance = blk_nlat_latency(ns->disk, node);
>>   		else
>>   			distance = LOCAL_DISTANCE;
>>   
>> @@ -381,8 +414,8 @@ static inline bool nvme_path_is_optimized(struct nvme_ns *ns)
>>   
>>   inline struct nvme_ns *nvme_find_path(struct nvme_ns_head *head)
>>   {
>> -	int iopolicy = READ_ONCE(head->subsys->iopolicy);
>>   	int node;
>> +	int iopolicy = READ_ONCE(head->subsys->iopolicy);
> 
> No need to move this line.
> 
Sure.

>>   	struct nvme_ns *ns;
>>   
>>   	/*
>> @@ -401,8 +434,8 @@ inline struct nvme_ns *nvme_find_path(struct nvme_ns_head *head)
>>   
>>   	if (iopolicy == NVME_IOPOLICY_RR)
>>   		return nvme_round_robin_path(head, node, ns);
>> -
>> -	if (unlikely(!nvme_path_is_optimized(ns)))
>> +	if (iopolicy == NVME_IOPOLICY_LAT ||
>> +	    unlikely(!nvme_path_is_optimized(ns)))
>>   		return __nvme_find_path(head, node);
>>   	return ns;
>>   }
>> @@ -872,15 +905,18 @@ static ssize_t nvme_subsys_iopolicy_store(struct device *dev,
>>   {
>>   	struct nvme_subsystem *subsys =
>>   		container_of(dev, struct nvme_subsystem, dev);
>> -	int i;
>> +	int i, ret;
>>   
>>   	for (i = 0; i < ARRAY_SIZE(nvme_iopolicy_names); i++) {
>>   		if (sysfs_streq(buf, nvme_iopolicy_names[i])) {
>> -			nvme_subsys_iopolicy_update(subsys, i);
>> -			return count;
>> +			ret = nvme_activate_iopolicy(subsys, i);
>> +			if (!ret) {
>> +				nvme_subsys_iopolicy_update(subsys, i);
>> +				return count;
>> +			}
>> +			return ret;
> 
> It would be nicer to have this as:
> 
> 			if (ret)
> 				break
> 			nvme_subsys_iopolicy_update(subsys, i);
> 			return count;
> 

Ok.

>>   		}
>>   	}
>> -
> 
> whiteline change.
> 
>>   	return -EINVAL;
> 
> And "return ret;" here with ret initialized to -EINVAL when declared.
> 
Ok.

>>   }
>>   SUBSYS_ATTR_RW(iopolicy, S_IRUGO | S_IWUSR,
>> @@ -916,6 +952,15 @@ static int nvme_lookup_ana_group_desc(struct nvme_ctrl *ctrl,
>>   
>>   void nvme_mpath_add_disk(struct nvme_ns *ns, __le32 anagrpid)
>>   {
>> +	if (!blk_nlat_init(ns->disk) &&
>> +	    READ_ONCE(ns->head->subsys->iopolicy) == NVME_IOPOLICY_LAT) {
>> +		int ret = blk_nlat_enable(ns->disk);
>> +
>> +		if (unlikely(ret))
>> +			pr_warn("%s: Failed to enable latency tracking, error %d\n",
>> +				ns->disk->disk_name, ret);
>> +	}
>> +
>>   	if (nvme_ctrl_use_ana(ns->ctrl)) {
>>   		struct nvme_ana_group_desc desc = {
>>   			.grpid = anagrpid,
>> diff --git a/drivers/nvme/host/nvme.h b/drivers/nvme/host/nvme.h
>> index a557b4577c01..66bf003a6c48 100644
>> --- a/drivers/nvme/host/nvme.h
>> +++ b/drivers/nvme/host/nvme.h
>> @@ -411,6 +411,7 @@ enum nvme_iopolicy {
>>   	NVME_IOPOLICY_NUMA,
>>   	NVME_IOPOLICY_RR,
>>   	NVME_IOPOLICY_QD,
>> +	NVME_IOPOLICY_LAT,
>>   };
>>   
>>   struct nvme_subsystem {
> 

Cheers,

Hannes
diff mbox series

Patch

diff --git a/drivers/nvme/host/multipath.c b/drivers/nvme/host/multipath.c
index d916a5ddf5d4..e9330bb1990b 100644
--- a/drivers/nvme/host/multipath.c
+++ b/drivers/nvme/host/multipath.c
@@ -18,6 +18,7 @@  static const char *nvme_iopolicy_names[] = {
 	[NVME_IOPOLICY_NUMA]	= "numa",
 	[NVME_IOPOLICY_RR]	= "round-robin",
 	[NVME_IOPOLICY_QD]      = "queue-depth",
+	[NVME_IOPOLICY_LAT]	= "latency",
 };
 
 static int iopolicy = NVME_IOPOLICY_NUMA;
@@ -32,6 +33,10 @@  static int nvme_set_iopolicy(const char *val, const struct kernel_param *kp)
 		iopolicy = NVME_IOPOLICY_RR;
 	else if (!strncmp(val, "queue-depth", 11))
 		iopolicy = NVME_IOPOLICY_QD;
+#ifdef CONFIG_BLK_NODE_LATENCY
+	else if (!strncmp(val, "latency", 7))
+		iopolicy = NVME_IOPOLICY_LAT;
+#endif
 	else
 		return -EINVAL;
 
@@ -43,10 +48,36 @@  static int nvme_get_iopolicy(char *buf, const struct kernel_param *kp)
 	return sprintf(buf, "%s\n", nvme_iopolicy_names[iopolicy]);
 }
 
+static int nvme_activate_iopolicy(struct nvme_subsystem *subsys, int iopolicy)
+{
+	struct nvme_ns_head *h;
+	struct nvme_ns *ns;
+	bool enable = iopolicy == NVME_IOPOLICY_LAT;
+	int ret = 0;
+
+	mutex_lock(&subsys->lock);
+	list_for_each_entry(h, &subsys->nsheads, entry) {
+		list_for_each_entry_rcu(ns, &h->list, siblings) {
+			if (enable) {
+				ret = blk_nlat_enable(ns->disk);
+				if (ret)
+					break;
+			} else
+				blk_nlat_disable(ns->disk);
+		}
+	}
+	mutex_unlock(&subsys->lock);
+	return ret;
+}
+
 module_param_call(iopolicy, nvme_set_iopolicy, nvme_get_iopolicy,
 	&iopolicy, 0644);
 MODULE_PARM_DESC(iopolicy,
+#if defined(CONFIG_BLK_NODE_LATENCY)
+	"Default multipath I/O policy; 'numa' (default) , 'round-robin', 'queue-depth' or 'latency'");
+#else
 	"Default multipath I/O policy; 'numa' (default) , 'round-robin' or 'queue-depth'");
+#endif
 
 void nvme_mpath_default_iopolicy(struct nvme_subsystem *subsys)
 {
@@ -250,14 +281,16 @@  static struct nvme_ns *__nvme_find_path(struct nvme_ns_head *head, int node)
 {
 	int found_distance = INT_MAX, fallback_distance = INT_MAX, distance;
 	struct nvme_ns *found = NULL, *fallback = NULL, *ns;
+	int iopolicy = READ_ONCE(head->subsys->iopolicy);
 
 	list_for_each_entry_rcu(ns, &head->list, siblings) {
 		if (nvme_path_is_disabled(ns))
 			continue;
 
-		if (ns->ctrl->numa_node != NUMA_NO_NODE &&
-		    READ_ONCE(head->subsys->iopolicy) == NVME_IOPOLICY_NUMA)
+		if (iopolicy == NVME_IOPOLICY_NUMA)
 			distance = node_distance(node, ns->ctrl->numa_node);
+		else if (iopolicy == NVME_IOPOLICY_LAT)
+			distance = blk_nlat_latency(ns->disk, node);
 		else
 			distance = LOCAL_DISTANCE;
 
@@ -381,8 +414,8 @@  static inline bool nvme_path_is_optimized(struct nvme_ns *ns)
 
 inline struct nvme_ns *nvme_find_path(struct nvme_ns_head *head)
 {
-	int iopolicy = READ_ONCE(head->subsys->iopolicy);
 	int node;
+	int iopolicy = READ_ONCE(head->subsys->iopolicy);
 	struct nvme_ns *ns;
 
 	/*
@@ -401,8 +434,8 @@  inline struct nvme_ns *nvme_find_path(struct nvme_ns_head *head)
 
 	if (iopolicy == NVME_IOPOLICY_RR)
 		return nvme_round_robin_path(head, node, ns);
-
-	if (unlikely(!nvme_path_is_optimized(ns)))
+	if (iopolicy == NVME_IOPOLICY_LAT ||
+	    unlikely(!nvme_path_is_optimized(ns)))
 		return __nvme_find_path(head, node);
 	return ns;
 }
@@ -872,15 +905,18 @@  static ssize_t nvme_subsys_iopolicy_store(struct device *dev,
 {
 	struct nvme_subsystem *subsys =
 		container_of(dev, struct nvme_subsystem, dev);
-	int i;
+	int i, ret;
 
 	for (i = 0; i < ARRAY_SIZE(nvme_iopolicy_names); i++) {
 		if (sysfs_streq(buf, nvme_iopolicy_names[i])) {
-			nvme_subsys_iopolicy_update(subsys, i);
-			return count;
+			ret = nvme_activate_iopolicy(subsys, i);
+			if (!ret) {
+				nvme_subsys_iopolicy_update(subsys, i);
+				return count;
+			}
+			return ret;
 		}
 	}
-
 	return -EINVAL;
 }
 SUBSYS_ATTR_RW(iopolicy, S_IRUGO | S_IWUSR,
@@ -916,6 +952,15 @@  static int nvme_lookup_ana_group_desc(struct nvme_ctrl *ctrl,
 
 void nvme_mpath_add_disk(struct nvme_ns *ns, __le32 anagrpid)
 {
+	if (!blk_nlat_init(ns->disk) &&
+	    READ_ONCE(ns->head->subsys->iopolicy) == NVME_IOPOLICY_LAT) {
+		int ret = blk_nlat_enable(ns->disk);
+
+		if (unlikely(ret))
+			pr_warn("%s: Failed to enable latency tracking, error %d\n",
+				ns->disk->disk_name, ret);
+	}
+
 	if (nvme_ctrl_use_ana(ns->ctrl)) {
 		struct nvme_ana_group_desc desc = {
 			.grpid = anagrpid,
diff --git a/drivers/nvme/host/nvme.h b/drivers/nvme/host/nvme.h
index a557b4577c01..66bf003a6c48 100644
--- a/drivers/nvme/host/nvme.h
+++ b/drivers/nvme/host/nvme.h
@@ -411,6 +411,7 @@  enum nvme_iopolicy {
 	NVME_IOPOLICY_NUMA,
 	NVME_IOPOLICY_RR,
 	NVME_IOPOLICY_QD,
+	NVME_IOPOLICY_LAT,
 };
 
 struct nvme_subsystem {