diff mbox series

[2/5] nvme: register ns_id attributes as default sysfs groups

Message ID 20180814073305.87255-3-hare@suse.de (mailing list archive)
State New, archived
Headers show
Series genhd: register default groups with device_add_disk() | expand

Commit Message

Hannes Reinecke Aug. 14, 2018, 7:33 a.m. UTC
We should be registering the ns_id attribute as default sysfs
attribute groups, otherwise we have a race condition between
the uevent and the attributes appearing in sysfs.

Signed-off-by: Hannes Reinecke <hare@suse.com>
Reviewed-by: Christoph Hellwig <hch@lst.de>
---
 drivers/nvme/host/core.c      | 21 +++++++++------------
 drivers/nvme/host/lightnvm.c  | 27 ++++-----------------------
 drivers/nvme/host/multipath.c | 15 ++++-----------
 drivers/nvme/host/nvme.h      | 11 +++--------
 4 files changed, 20 insertions(+), 54 deletions(-)

Comments

Javier González Aug. 14, 2018, 9:03 a.m. UTC | #1
> 
> On 14 Aug 2018, at 09.33, Hannes Reinecke <hare@suse.de> wrote:
> 
> We should be registering the ns_id attribute as default sysfs
> attribute groups, otherwise we have a race condition between
> the uevent and the attributes appearing in sysfs.
> 
> Signed-off-by: Hannes Reinecke <hare@suse.com>
> Reviewed-by: Christoph Hellwig <hch@lst.de>
> ---
> drivers/nvme/host/core.c      | 21 +++++++++------------
> drivers/nvme/host/lightnvm.c  | 27 ++++-----------------------
> drivers/nvme/host/multipath.c | 15 ++++-----------
> drivers/nvme/host/nvme.h      | 11 +++--------
> 4 files changed, 20 insertions(+), 54 deletions(-)
> 
> diff --git a/drivers/nvme/host/core.c b/drivers/nvme/host/core.c
> index 0e824e8c8fd7..8e26d98e9a8f 100644
> --- a/drivers/nvme/host/core.c
> +++ b/drivers/nvme/host/core.c
> @@ -2734,6 +2734,12 @@ const struct attribute_group nvme_ns_id_attr_group = {
> 	.is_visible	= nvme_ns_id_attrs_are_visible,
> };
> 
> +const struct attribute_group *nvme_ns_id_attr_groups[] = {
> +	&nvme_ns_id_attr_group,
> +	NULL, /* Will be filled in by lightnvm if present */
> +	NULL,
> +};
> +
> #define nvme_show_str_function(field)						\
> static ssize_t  field##_show(struct device *dev,				\
> 			    struct device_attribute *attr, char *buf)		\
> @@ -3099,14 +3105,9 @@ static void nvme_alloc_ns(struct nvme_ctrl *ctrl, unsigned nsid)
> 
> 	nvme_get_ctrl(ctrl);
> 
> -	device_add_disk(ctrl->device, ns->disk, NULL);
> -	if (sysfs_create_group(&disk_to_dev(ns->disk)->kobj,
> -					&nvme_ns_id_attr_group))
> -		pr_warn("%s: failed to create sysfs group for identification\n",
> -			ns->disk->disk_name);
> -	if (ns->ndev && nvme_nvm_register_sysfs(ns))
> -		pr_warn("%s: failed to register lightnvm sysfs group for identification\n",
> -			ns->disk->disk_name);
> +	if (ns->ndev)
> +		nvme_nvm_register_sysfs(ns);
> +	device_add_disk(ctrl->device, ns->disk, nvme_ns_id_attr_groups);
> 
> 	nvme_mpath_add_disk(ns, id);
> 	nvme_fault_inject_init(ns);
> @@ -3132,10 +3133,6 @@ static void nvme_ns_remove(struct nvme_ns *ns)
> 
> 	nvme_fault_inject_fini(ns);
> 	if (ns->disk && ns->disk->flags & GENHD_FL_UP) {
> -		sysfs_remove_group(&disk_to_dev(ns->disk)->kobj,
> -					&nvme_ns_id_attr_group);
> -		if (ns->ndev)
> -			nvme_nvm_unregister_sysfs(ns);
> 		del_gendisk(ns->disk);
> 		blk_cleanup_queue(ns->queue);
> 		if (blk_get_integrity(ns->disk))
> diff --git a/drivers/nvme/host/lightnvm.c b/drivers/nvme/host/lightnvm.c
> index 6fe5923c95d4..7bf2f9da6293 100644
> --- a/drivers/nvme/host/lightnvm.c
> +++ b/drivers/nvme/host/lightnvm.c
> @@ -1270,39 +1270,20 @@ static const struct attribute_group nvm_dev_attr_group_20 = {
> 	.attrs		= nvm_dev_attrs_20,
> };
> 
> -int nvme_nvm_register_sysfs(struct nvme_ns *ns)
> +void nvme_nvm_register_sysfs(struct nvme_ns *ns)
> {
> 	struct nvm_dev *ndev = ns->ndev;
> 	struct nvm_geo *geo = &ndev->geo;
> 
> 	if (!ndev)
> -		return -EINVAL;
> -
> -	switch (geo->major_ver_id) {
> -	case 1:
> -		return sysfs_create_group(&disk_to_dev(ns->disk)->kobj,
> -					&nvm_dev_attr_group_12);
> -	case 2:
> -		return sysfs_create_group(&disk_to_dev(ns->disk)->kobj,
> -					&nvm_dev_attr_group_20);
> -	}
> -
> -	return -EINVAL;
> -}
> -
> -void nvme_nvm_unregister_sysfs(struct nvme_ns *ns)
> -{
> -	struct nvm_dev *ndev = ns->ndev;
> -	struct nvm_geo *geo = &ndev->geo;
> +		return;
> 
> 	switch (geo->major_ver_id) {
> 	case 1:
> -		sysfs_remove_group(&disk_to_dev(ns->disk)->kobj,
> -					&nvm_dev_attr_group_12);
> +		nvme_ns_id_attr_groups[1] = &nvm_dev_attr_group_12;
> 		break;
> 	case 2:
> -		sysfs_remove_group(&disk_to_dev(ns->disk)->kobj,
> -					&nvm_dev_attr_group_20);
> +		nvme_ns_id_attr_groups[1] = &nvm_dev_attr_group_20;
> 		break;
> 	}
> }
> diff --git a/drivers/nvme/host/multipath.c b/drivers/nvme/host/multipath.c
> index 477af51d01e8..8e846095c42d 100644
> --- a/drivers/nvme/host/multipath.c
> +++ b/drivers/nvme/host/multipath.c
> @@ -282,13 +282,9 @@ static void nvme_mpath_set_live(struct nvme_ns *ns)
> 	if (!head->disk)
> 		return;
> 
> -	if (!(head->disk->flags & GENHD_FL_UP)) {
> -		device_add_disk(&head->subsys->dev, head->disk, NULL);
> -		if (sysfs_create_group(&disk_to_dev(head->disk)->kobj,
> -				&nvme_ns_id_attr_group))
> -			dev_warn(&head->subsys->dev,
> -				 "failed to create id group.\n");
> -	}
> +	if (!(head->disk->flags & GENHD_FL_UP))
> +		device_add_disk(&head->subsys->dev, head->disk,
> +				nvme_ns_id_attr_groups);
> 
> 	kblockd_schedule_work(&ns->head->requeue_work);
> }
> @@ -494,11 +490,8 @@ void nvme_mpath_remove_disk(struct nvme_ns_head *head)
> {
> 	if (!head->disk)
> 		return;
> -	if (head->disk->flags & GENHD_FL_UP) {
> -		sysfs_remove_group(&disk_to_dev(head->disk)->kobj,
> -				   &nvme_ns_id_attr_group);
> +	if (head->disk->flags & GENHD_FL_UP)
> 		del_gendisk(head->disk);
> -	}
> 	blk_set_queue_dying(head->disk->queue);
> 	/* make sure all pending bios are cleaned up */
> 	kblockd_schedule_work(&head->requeue_work);
> diff --git a/drivers/nvme/host/nvme.h b/drivers/nvme/host/nvme.h
> index bb4a2003c097..9ba6d67d8e0a 100644
> --- a/drivers/nvme/host/nvme.h
> +++ b/drivers/nvme/host/nvme.h
> @@ -459,7 +459,7 @@ int nvme_delete_ctrl_sync(struct nvme_ctrl *ctrl);
> int nvme_get_log(struct nvme_ctrl *ctrl, u32 nsid, u8 log_page, u8 lsp,
> 		void *log, size_t size, u64 offset);
> 
> -extern const struct attribute_group nvme_ns_id_attr_group;
> +extern const struct attribute_group *nvme_ns_id_attr_groups[];
> extern const struct block_device_operations nvme_ns_head_ops;
> 
> #ifdef CONFIG_NVME_MULTIPATH
> @@ -551,8 +551,7 @@ static inline void nvme_mpath_stop(struct nvme_ctrl *ctrl)
> void nvme_nvm_update_nvm_info(struct nvme_ns *ns);
> int nvme_nvm_register(struct nvme_ns *ns, char *disk_name, int node);
> void nvme_nvm_unregister(struct nvme_ns *ns);
> -int nvme_nvm_register_sysfs(struct nvme_ns *ns);
> -void nvme_nvm_unregister_sysfs(struct nvme_ns *ns);
> +void nvme_nvm_register_sysfs(struct nvme_ns *ns);
> int nvme_nvm_ioctl(struct nvme_ns *ns, unsigned int cmd, unsigned long arg);
> #else
> static inline void nvme_nvm_update_nvm_info(struct nvme_ns *ns) {};
> @@ -563,11 +562,7 @@ static inline int nvme_nvm_register(struct nvme_ns *ns, char *disk_name,
> }
> 
> static inline void nvme_nvm_unregister(struct nvme_ns *ns) {};
> -static inline int nvme_nvm_register_sysfs(struct nvme_ns *ns)
> -{
> -	return 0;
> -}
> -static inline void nvme_nvm_unregister_sysfs(struct nvme_ns *ns) {};
> +static inline void nvme_nvm_register_sysfs(struct nvme_ns *ns) {};
> static inline int nvme_nvm_ioctl(struct nvme_ns *ns, unsigned int cmd,
> 							unsigned long arg)
> {
> --
> 2.12.3
> 
> 

The lightnvm part you added to V2 looks good. Thanks!


Reviewed-by: Javier González <javier@cnexlabs.com>
Matias Bjorling Aug. 14, 2018, 9:59 a.m. UTC | #2
On 08/14/2018 09:33 AM, Hannes Reinecke wrote:
> We should be registering the ns_id attribute as default sysfs
> attribute groups, otherwise we have a race condition between
> the uevent and the attributes appearing in sysfs.
> 
> Signed-off-by: Hannes Reinecke <hare@suse.com>
> Reviewed-by: Christoph Hellwig <hch@lst.de>
> ---
>   drivers/nvme/host/core.c      | 21 +++++++++------------
>   drivers/nvme/host/lightnvm.c  | 27 ++++-----------------------
>   drivers/nvme/host/multipath.c | 15 ++++-----------
>   drivers/nvme/host/nvme.h      | 11 +++--------
>   4 files changed, 20 insertions(+), 54 deletions(-)
> 
> diff --git a/drivers/nvme/host/core.c b/drivers/nvme/host/core.c
> index 0e824e8c8fd7..8e26d98e9a8f 100644
> --- a/drivers/nvme/host/core.c
> +++ b/drivers/nvme/host/core.c
> @@ -2734,6 +2734,12 @@ const struct attribute_group nvme_ns_id_attr_group = {
>   	.is_visible	= nvme_ns_id_attrs_are_visible,
>   };
>   
> +const struct attribute_group *nvme_ns_id_attr_groups[] = {
> +	&nvme_ns_id_attr_group,
> +	NULL, /* Will be filled in by lightnvm if present */
> +	NULL,
> +};
> +
>   #define nvme_show_str_function(field)						\
>   static ssize_t  field##_show(struct device *dev,				\
>   			    struct device_attribute *attr, char *buf)		\
> @@ -3099,14 +3105,9 @@ static void nvme_alloc_ns(struct nvme_ctrl *ctrl, unsigned nsid)
>   
>   	nvme_get_ctrl(ctrl);
>   
> -	device_add_disk(ctrl->device, ns->disk, NULL);
> -	if (sysfs_create_group(&disk_to_dev(ns->disk)->kobj,
> -					&nvme_ns_id_attr_group))
> -		pr_warn("%s: failed to create sysfs group for identification\n",
> -			ns->disk->disk_name);
> -	if (ns->ndev && nvme_nvm_register_sysfs(ns))
> -		pr_warn("%s: failed to register lightnvm sysfs group for identification\n",
> -			ns->disk->disk_name);
> +	if (ns->ndev)
> +		nvme_nvm_register_sysfs(ns);
> +	device_add_disk(ctrl->device, ns->disk, nvme_ns_id_attr_groups);
>   
>   	nvme_mpath_add_disk(ns, id);
>   	nvme_fault_inject_init(ns);
> @@ -3132,10 +3133,6 @@ static void nvme_ns_remove(struct nvme_ns *ns)
>   
>   	nvme_fault_inject_fini(ns);
>   	if (ns->disk && ns->disk->flags & GENHD_FL_UP) {
> -		sysfs_remove_group(&disk_to_dev(ns->disk)->kobj,
> -					&nvme_ns_id_attr_group);
> -		if (ns->ndev)
> -			nvme_nvm_unregister_sysfs(ns);
>   		del_gendisk(ns->disk);
>   		blk_cleanup_queue(ns->queue);
>   		if (blk_get_integrity(ns->disk))
> diff --git a/drivers/nvme/host/lightnvm.c b/drivers/nvme/host/lightnvm.c
> index 6fe5923c95d4..7bf2f9da6293 100644
> --- a/drivers/nvme/host/lightnvm.c
> +++ b/drivers/nvme/host/lightnvm.c
> @@ -1270,39 +1270,20 @@ static const struct attribute_group nvm_dev_attr_group_20 = {
>   	.attrs		= nvm_dev_attrs_20,
>   };
>   
> -int nvme_nvm_register_sysfs(struct nvme_ns *ns)
> +void nvme_nvm_register_sysfs(struct nvme_ns *ns)
>   {
>   	struct nvm_dev *ndev = ns->ndev;
>   	struct nvm_geo *geo = &ndev->geo;
>   
>   	if (!ndev)
> -		return -EINVAL;
> -
> -	switch (geo->major_ver_id) {
> -	case 1:
> -		return sysfs_create_group(&disk_to_dev(ns->disk)->kobj,
> -					&nvm_dev_attr_group_12);
> -	case 2:
> -		return sysfs_create_group(&disk_to_dev(ns->disk)->kobj,
> -					&nvm_dev_attr_group_20);
> -	}
> -
> -	return -EINVAL;
> -}
> -
> -void nvme_nvm_unregister_sysfs(struct nvme_ns *ns)
> -{
> -	struct nvm_dev *ndev = ns->ndev;
> -	struct nvm_geo *geo = &ndev->geo;
> +		return;
>   
>   	switch (geo->major_ver_id) {
>   	case 1:
> -		sysfs_remove_group(&disk_to_dev(ns->disk)->kobj,
> -					&nvm_dev_attr_group_12);
> +		nvme_ns_id_attr_groups[1] = &nvm_dev_attr_group_12;
>   		break;
>   	case 2:
> -		sysfs_remove_group(&disk_to_dev(ns->disk)->kobj,
> -					&nvm_dev_attr_group_20);
> +		nvme_ns_id_attr_groups[1] = &nvm_dev_attr_group_20;
>   		break;
>   	}
>   }
> diff --git a/drivers/nvme/host/multipath.c b/drivers/nvme/host/multipath.c
> index 477af51d01e8..8e846095c42d 100644
> --- a/drivers/nvme/host/multipath.c
> +++ b/drivers/nvme/host/multipath.c
> @@ -282,13 +282,9 @@ static void nvme_mpath_set_live(struct nvme_ns *ns)
>   	if (!head->disk)
>   		return;
>   
> -	if (!(head->disk->flags & GENHD_FL_UP)) {
> -		device_add_disk(&head->subsys->dev, head->disk, NULL);
> -		if (sysfs_create_group(&disk_to_dev(head->disk)->kobj,
> -				&nvme_ns_id_attr_group))
> -			dev_warn(&head->subsys->dev,
> -				 "failed to create id group.\n");
> -	}
> +	if (!(head->disk->flags & GENHD_FL_UP))
> +		device_add_disk(&head->subsys->dev, head->disk,
> +				nvme_ns_id_attr_groups);
>   
>   	kblockd_schedule_work(&ns->head->requeue_work);
>   }
> @@ -494,11 +490,8 @@ void nvme_mpath_remove_disk(struct nvme_ns_head *head)
>   {
>   	if (!head->disk)
>   		return;
> -	if (head->disk->flags & GENHD_FL_UP) {
> -		sysfs_remove_group(&disk_to_dev(head->disk)->kobj,
> -				   &nvme_ns_id_attr_group);
> +	if (head->disk->flags & GENHD_FL_UP)
>   		del_gendisk(head->disk);
> -	}
>   	blk_set_queue_dying(head->disk->queue);
>   	/* make sure all pending bios are cleaned up */
>   	kblockd_schedule_work(&head->requeue_work);
> diff --git a/drivers/nvme/host/nvme.h b/drivers/nvme/host/nvme.h
> index bb4a2003c097..9ba6d67d8e0a 100644
> --- a/drivers/nvme/host/nvme.h
> +++ b/drivers/nvme/host/nvme.h
> @@ -459,7 +459,7 @@ int nvme_delete_ctrl_sync(struct nvme_ctrl *ctrl);
>   int nvme_get_log(struct nvme_ctrl *ctrl, u32 nsid, u8 log_page, u8 lsp,
>   		void *log, size_t size, u64 offset);
>   
> -extern const struct attribute_group nvme_ns_id_attr_group;
> +extern const struct attribute_group *nvme_ns_id_attr_groups[];
>   extern const struct block_device_operations nvme_ns_head_ops;
>   
>   #ifdef CONFIG_NVME_MULTIPATH
> @@ -551,8 +551,7 @@ static inline void nvme_mpath_stop(struct nvme_ctrl *ctrl)
>   void nvme_nvm_update_nvm_info(struct nvme_ns *ns);
>   int nvme_nvm_register(struct nvme_ns *ns, char *disk_name, int node);
>   void nvme_nvm_unregister(struct nvme_ns *ns);
> -int nvme_nvm_register_sysfs(struct nvme_ns *ns);
> -void nvme_nvm_unregister_sysfs(struct nvme_ns *ns);
> +void nvme_nvm_register_sysfs(struct nvme_ns *ns);
>   int nvme_nvm_ioctl(struct nvme_ns *ns, unsigned int cmd, unsigned long arg);
>   #else
>   static inline void nvme_nvm_update_nvm_info(struct nvme_ns *ns) {};
> @@ -563,11 +562,7 @@ static inline int nvme_nvm_register(struct nvme_ns *ns, char *disk_name,
>   }
>   
>   static inline void nvme_nvm_unregister(struct nvme_ns *ns) {};
> -static inline int nvme_nvm_register_sysfs(struct nvme_ns *ns)
> -{
> -	return 0;
> -}
> -static inline void nvme_nvm_unregister_sysfs(struct nvme_ns *ns) {};
> +static inline void nvme_nvm_register_sysfs(struct nvme_ns *ns) {};
>   static inline int nvme_nvm_ioctl(struct nvme_ns *ns, unsigned int cmd,
>   							unsigned long arg)
>   {
> 

Thanks Hannes.

Acked-by: Matias Bjørling <mb@lightnvm.io>
Bart Van Assche Aug. 14, 2018, 3:20 p.m. UTC | #3
On Tue, 2018-08-14 at 09:33 +0200, Hannes Reinecke wrote:
> +const struct attribute_group *nvme_ns_id_attr_groups[] = {
> +	&nvme_ns_id_attr_group,
> +	NULL, /* Will be filled in by lightnvm if present */
> +	NULL,
> +};
[ ... ]
> -void nvme_nvm_unregister_sysfs(struct nvme_ns *ns)
> -{
> -	struct nvm_dev *ndev = ns->ndev;
> -	struct nvm_geo *geo = &ndev->geo;
> +		return;
>  
>  	switch (geo->major_ver_id) {
>  	case 1:
> -		sysfs_remove_group(&disk_to_dev(ns->disk)->kobj,
> -					&nvm_dev_attr_group_12);
> +		nvme_ns_id_attr_groups[1] = &nvm_dev_attr_group_12;
>  		break;
>  	case 2:
> -		sysfs_remove_group(&disk_to_dev(ns->disk)->kobj,
> -					&nvm_dev_attr_group_20);
> +		nvme_ns_id_attr_groups[1] = &nvm_dev_attr_group_20;

This patch introduces a really ugly dependency between the NVMe core code and
the lightnvm code, namely that the lightnvm code has to know at which position
in the nvme_ns_id_attr_groups it can fill in its attribute group pointer. Have
you considered to make nvme_nvm_register_sysfs() return an attribute group
pointer such that the nvme_ns_id_attr_groups can be changed from a global into
a static array?

Thanks,

Bart.
Hannes Reinecke Aug. 14, 2018, 3:39 p.m. UTC | #4
On 08/14/2018 05:20 PM, Bart Van Assche wrote:
> On Tue, 2018-08-14 at 09:33 +0200, Hannes Reinecke wrote:
>> +const struct attribute_group *nvme_ns_id_attr_groups[] = {
>> +	&nvme_ns_id_attr_group,
>> +	NULL, /* Will be filled in by lightnvm if present */
>> +	NULL,
>> +};
> [ ... ]
>> -void nvme_nvm_unregister_sysfs(struct nvme_ns *ns)
>> -{
>> -	struct nvm_dev *ndev = ns->ndev;
>> -	struct nvm_geo *geo = &ndev->geo;
>> +		return;
>>  
>>  	switch (geo->major_ver_id) {
>>  	case 1:
>> -		sysfs_remove_group(&disk_to_dev(ns->disk)->kobj,
>> -					&nvm_dev_attr_group_12);
>> +		nvme_ns_id_attr_groups[1] = &nvm_dev_attr_group_12;
>>  		break;
>>  	case 2:
>> -		sysfs_remove_group(&disk_to_dev(ns->disk)->kobj,
>> -					&nvm_dev_attr_group_20);
>> +		nvme_ns_id_attr_groups[1] = &nvm_dev_attr_group_20;
> 
> This patch introduces a really ugly dependency between the NVMe core code and
> the lightnvm code, namely that the lightnvm code has to know at which position
> in the nvme_ns_id_attr_groups it can fill in its attribute group pointer. Have
> you considered to make nvme_nvm_register_sysfs() return an attribute group
> pointer such that the nvme_ns_id_attr_groups can be changed from a global into
> a static array?
> 
Wouldn't help much, as the 'nvme_ns_id_attr_groups' needs to be a global
pointer anyway as it's references from drivers/nvme/host/core.c and
drivers/nvme/host/multipath.c.

While I have considered having nvme_nvm_register_sysfs() returning a
pointer I would then have to remove the 'static' declaration from the
nvm_dev_attr_group_12/20.
Which I didn't really like, either.

Cheers,

Hannes
Bart Van Assche Aug. 14, 2018, 3:44 p.m. UTC | #5
On Tue, 2018-08-14 at 17:39 +0200, Hannes Reinecke wrote:
> While I have considered having nvme_nvm_register_sysfs() returning a
> pointer I would then have to remove the 'static' declaration from the
> nvm_dev_attr_group_12/20.
> Which I didn't really like, either.

Hmm ... I don't see why the static declaration would have to be removed from
nvm_dev_attr_group_12/20 if nvme_nvm_register_sysfs() would return a pointer?
Am I perhaps missing something?

Thanks,

Bart.
kernel test robot Aug. 14, 2018, 9:53 p.m. UTC | #6
Hi Hannes,

I love your patch! Perhaps something to improve:

[auto build test WARNING on block/for-next]
[also build test WARNING on next-20180814]
[cannot apply to v4.18]
[if your patch is applied to the wrong git tree, please drop us a note to help improve the system]

url:    https://github.com/0day-ci/linux/commits/Hannes-Reinecke/genhd-register-default-groups-with-device_add_disk/20180815-034942
base:   https://git.kernel.org/pub/scm/linux/kernel/git/axboe/linux-block.git for-next
reproduce:
        # apt-get install sparse
        make ARCH=x86_64 allmodconfig
        make C=1 CF=-D__CHECK_ENDIAN__


sparse warnings: (new ones prefixed by >>)

   include/uapi/linux/perf_event.h:147:56: sparse: cast truncates bits from constant value (8000000000000000 becomes 0)
   drivers/nvme/host/core.c:506:28: sparse: expression using sizeof(void)
   include/linux/blkdev.h:1269:16: sparse: expression using sizeof(void)
   include/linux/slab.h:631:13: sparse: undefined identifier '__builtin_mul_overflow'
   drivers/nvme/host/core.c:1062:32: sparse: expression using sizeof(void)
   drivers/nvme/host/core.c:1062:32: sparse: expression using sizeof(void)
   drivers/nvme/host/core.c:1063:26: sparse: expression using sizeof(void)
   drivers/nvme/host/core.c:1063:26: sparse: expression using sizeof(void)
   drivers/nvme/host/core.c:1844:32: sparse: expression using sizeof(void)
   drivers/nvme/host/core.c:1844:32: sparse: expression using sizeof(void)
   drivers/nvme/host/core.c:1846:43: sparse: expression using sizeof(void)
   drivers/nvme/host/core.c:2406:17: sparse: expression using sizeof(void)
   drivers/nvme/host/core.c:2406:17: sparse: expression using sizeof(void)
   drivers/nvme/host/core.c:2417:42: sparse: expression using sizeof(void)
   drivers/nvme/host/core.c:2417:42: sparse: expression using sizeof(void)
   drivers/nvme/host/core.c:2417:42: sparse: expression using sizeof(void)
   drivers/nvme/host/core.c:2417:42: sparse: expression using sizeof(void)
   drivers/nvme/host/core.c:2417:42: sparse: expression using sizeof(void)
   drivers/nvme/host/core.c:2417:42: sparse: expression using sizeof(void)
   drivers/nvme/host/core.c:2417:42: sparse: expression using sizeof(void)
   drivers/nvme/host/core.c:2417:42: sparse: expression using sizeof(void)
   drivers/nvme/host/core.c:2417:42: sparse: expression using sizeof(void)
   drivers/nvme/host/core.c:2417:42: sparse: expression using sizeof(void)
   drivers/nvme/host/core.c:2417:42: sparse: expression using sizeof(void)
   drivers/nvme/host/core.c:2417:42: sparse: expression using sizeof(void)
   drivers/nvme/host/core.c:2417:42: sparse: expression using sizeof(void)
>> drivers/nvme/host/core.c:2732:30: sparse: symbol 'nvme_ns_id_attr_group' was not declared. Should it be static?
   drivers/nvme/host/core.c:3203:33: sparse: expression using sizeof(void)
   drivers/nvme/host/core.c:3566:17: sparse: expression using sizeof(void)
   include/linux/slab.h:631:13: sparse: call with no type!
   drivers/nvme/host/core.c:1264:23: sparse: context imbalance in 'nvme_get_ns_from_disk' - wrong count at exit
   drivers/nvme/host/core.c:1282:33: sparse: context imbalance in 'nvme_put_ns_from_disk' - unexpected unlock

Please review and possibly fold the followup patch.

---
0-DAY kernel test infrastructure                Open Source Technology Center
https://lists.01.org/pipermail/kbuild-all                   Intel Corporation
Christoph Hellwig Aug. 17, 2018, 6:55 a.m. UTC | #7
On Tue, Aug 14, 2018 at 09:33:02AM +0200, Hannes Reinecke wrote:
> We should be registering the ns_id attribute as default sysfs
> attribute groups, otherwise we have a race condition between
> the uevent and the attributes appearing in sysfs.
> 
> Signed-off-by: Hannes Reinecke <hare@suse.com>
> Reviewed-by: Christoph Hellwig <hch@lst.de>

I only reviewed the original patch without the lightnvm additions.
Please drop reviewed-by tags if you make non-trivial changes.

In this case I also wonder if you should have just kept the original
patch as-is and added a new one for lightnvm.
Christoph Hellwig Aug. 17, 2018, 7 a.m. UTC | #8
On Tue, Aug 14, 2018 at 03:44:57PM +0000, Bart Van Assche wrote:
> On Tue, 2018-08-14 at 17:39 +0200, Hannes Reinecke wrote:
> > While I have considered having nvme_nvm_register_sysfs() returning a
> > pointer I would then have to remove the 'static' declaration from the
> > nvm_dev_attr_group_12/20.
> > Which I didn't really like, either.
> 
> Hmm ... I don't see why the static declaration would have to be removed from
> nvm_dev_attr_group_12/20 if nvme_nvm_register_sysfs() would return a pointer?
> Am I perhaps missing something?

No, I think that would be the preferable approach IFF patching the global
table of groups would be viable.  I don't think it is, though - we can
have both normal NVMe and LightNVM devices in the same system, so we
can't just patch it over.

So we'll need three different attribut group arrays:

const struct attribute_group *nvme_ns_id_attr_groups[] = {
	&nvme_ns_id_attr_group,
	NULL,
};

const struct attribute_group *lightnvm12_ns_id_attr_groups[] = {
	&nvme_ns_id_attr_group,
	&nvm_dev_attr_group_12,
	NULL,
};

const struct attribute_group *lightnvm20_ns_id_attr_groups[] = {
	&nvme_ns_id_attr_group,
	&nvm_dev_attr_group_20,
	NULL,
};

and a function to select which one to use.
Hannes Reinecke Aug. 17, 2018, 7:53 a.m. UTC | #9
On 08/17/2018 09:00 AM, hch@lst.de wrote:
> On Tue, Aug 14, 2018 at 03:44:57PM +0000, Bart Van Assche wrote:
>> On Tue, 2018-08-14 at 17:39 +0200, Hannes Reinecke wrote:
>>> While I have considered having nvme_nvm_register_sysfs() returning a
>>> pointer I would then have to remove the 'static' declaration from the
>>> nvm_dev_attr_group_12/20.
>>> Which I didn't really like, either.
>>
>> Hmm ... I don't see why the static declaration would have to be removed from
>> nvm_dev_attr_group_12/20 if nvme_nvm_register_sysfs() would return a pointer?
>> Am I perhaps missing something?
> 
> No, I think that would be the preferable approach IFF patching the global
> table of groups would be viable.  I don't think it is, though - we can
> have both normal NVMe and LightNVM devices in the same system, so we
> can't just patch it over.
> 
> So we'll need three different attribut group arrays:
> 
> const struct attribute_group *nvme_ns_id_attr_groups[] = {
> 	&nvme_ns_id_attr_group,
> 	NULL,
> };
> 
> const struct attribute_group *lightnvm12_ns_id_attr_groups[] = {
> 	&nvme_ns_id_attr_group,
> 	&nvm_dev_attr_group_12,
> 	NULL,
> };
> 
> const struct attribute_group *lightnvm20_ns_id_attr_groups[] = {
> 	&nvme_ns_id_attr_group,
> 	&nvm_dev_attr_group_20,
> 	NULL,
> };
> 
> and a function to select which one to use.
> 
Yeah, I figured the same thing.
I'll be redoing the patchset.

Cheers,

Hannes
Bart Van Assche Aug. 17, 2018, 8:04 p.m. UTC | #10
On Fri, 2018-08-17 at 09:00 +0200, hch@lst.de wrote:
> On Tue, Aug 14, 2018 at 03:44:57PM +0000, Bart Van Assche wrote:
> > On Tue, 2018-08-14 at 17:39 +0200, Hannes Reinecke wrote:
> > > While I have considered having nvme_nvm_register_sysfs() returning a
> > > pointer I would then have to remove the 'static' declaration from the
> > > nvm_dev_attr_group_12/20.
> > > Which I didn't really like, either.
> > 
> > Hmm ... I don't see why the static declaration would have to be removed from
> > nvm_dev_attr_group_12/20 if nvme_nvm_register_sysfs() would return a pointer?
> > Am I perhaps missing something?
> 
> No, I think that would be the preferable approach IFF patching the global
> table of groups would be viable.  I don't think it is, though - we can
> have both normal NVMe and LightNVM devices in the same system, so we
> can't just patch it over.
> 
> So we'll need three different attribut group arrays:
> 
> const struct attribute_group *nvme_ns_id_attr_groups[] = {
> 	&nvme_ns_id_attr_group,
> 	NULL,
> };
> 
> const struct attribute_group *lightnvm12_ns_id_attr_groups[] = {
> 	&nvme_ns_id_attr_group,
> 	&nvm_dev_attr_group_12,
> 	NULL,
> };
> 
> const struct attribute_group *lightnvm20_ns_id_attr_groups[] = {
> 	&nvme_ns_id_attr_group,
> 	&nvm_dev_attr_group_20,
> 	NULL,
> };
> 
> and a function to select which one to use.

Hello Christoph,

How about applying the patch below on top of Hannes' patch? The patch below
has the advantage that it completely separates the open channel sysfs
attributes from the NVMe core sysfs attributes - the open channel code
doesn't have to know anything about the NVMe core sysfs attributes and the
NVMe core does not have to know anything about the open channel sysfs
attributes.

Thanks,

Bart.


diff --git a/drivers/nvme/host/core.c b/drivers/nvme/host/core.c
index 8e26d98e9a8f..e0a9e1c5b30e 100644
--- a/drivers/nvme/host/core.c
+++ b/drivers/nvme/host/core.c
@@ -2736,7 +2736,9 @@ const struct attribute_group nvme_ns_id_attr_group = {
 
 const struct attribute_group *nvme_ns_id_attr_groups[] = {
 	&nvme_ns_id_attr_group,
-	NULL, /* Will be filled in by lightnvm if present */
+#ifdef CONFIG_NVM
+	&nvme_nvm_attr_group,
+#endif
 	NULL,
 };
 
@@ -3105,8 +3107,6 @@ static void nvme_alloc_ns(struct nvme_ctrl *ctrl, unsigned nsid)
 
 	nvme_get_ctrl(ctrl);
 
-	if (ns->ndev)
-		nvme_nvm_register_sysfs(ns);
 	device_add_disk(ctrl->device, ns->disk, nvme_ns_id_attr_groups);
 
 	nvme_mpath_add_disk(ns, id);
diff --git a/drivers/nvme/host/lightnvm.c b/drivers/nvme/host/lightnvm.c
index 7bf2f9da6293..b7b19d3561a4 100644
--- a/drivers/nvme/host/lightnvm.c
+++ b/drivers/nvme/host/lightnvm.c
@@ -1190,10 +1190,29 @@ static NVM_DEV_ATTR_12_RO(multiplane_modes);
 static NVM_DEV_ATTR_12_RO(media_capabilities);
 static NVM_DEV_ATTR_12_RO(max_phys_secs);
 
-static struct attribute *nvm_dev_attrs_12[] = {
+/* 2.0 values */
+static NVM_DEV_ATTR_20_RO(groups);
+static NVM_DEV_ATTR_20_RO(punits);
+static NVM_DEV_ATTR_20_RO(chunks);
+static NVM_DEV_ATTR_20_RO(clba);
+static NVM_DEV_ATTR_20_RO(ws_min);
+static NVM_DEV_ATTR_20_RO(ws_opt);
+static NVM_DEV_ATTR_20_RO(maxoc);
+static NVM_DEV_ATTR_20_RO(maxocpu);
+static NVM_DEV_ATTR_20_RO(mw_cunits);
+static NVM_DEV_ATTR_20_RO(write_typ);
+static NVM_DEV_ATTR_20_RO(write_max);
+static NVM_DEV_ATTR_20_RO(reset_typ);
+static NVM_DEV_ATTR_20_RO(reset_max);
+
+static struct attribute *nvm_dev_attrs[] = {
+	/* version agnostic attrs */
 	&dev_attr_version.attr,
 	&dev_attr_capabilities.attr,
+	&dev_attr_read_typ.attr,
+	&dev_attr_read_max.attr,
 
+	/* 1.2 attrs */
 	&dev_attr_vendor_opcode.attr,
 	&dev_attr_device_mode.attr,
 	&dev_attr_media_manager.attr,
@@ -1208,8 +1227,6 @@ static struct attribute *nvm_dev_attrs_12[] = {
 	&dev_attr_page_size.attr,
 	&dev_attr_hw_sector_size.attr,
 	&dev_attr_oob_sector_size.attr,
-	&dev_attr_read_typ.attr,
-	&dev_attr_read_max.attr,
 	&dev_attr_prog_typ.attr,
 	&dev_attr_prog_max.attr,
 	&dev_attr_erase_typ.attr,
@@ -1218,33 +1235,7 @@ static struct attribute *nvm_dev_attrs_12[] = {
 	&dev_attr_media_capabilities.attr,
 	&dev_attr_max_phys_secs.attr,
 
-	NULL,
-};
-
-static const struct attribute_group nvm_dev_attr_group_12 = {
-	.name		= "lightnvm",
-	.attrs		= nvm_dev_attrs_12,
-};
-
-/* 2.0 values */
-static NVM_DEV_ATTR_20_RO(groups);
-static NVM_DEV_ATTR_20_RO(punits);
-static NVM_DEV_ATTR_20_RO(chunks);
-static NVM_DEV_ATTR_20_RO(clba);
-static NVM_DEV_ATTR_20_RO(ws_min);
-static NVM_DEV_ATTR_20_RO(ws_opt);
-static NVM_DEV_ATTR_20_RO(maxoc);
-static NVM_DEV_ATTR_20_RO(maxocpu);
-static NVM_DEV_ATTR_20_RO(mw_cunits);
-static NVM_DEV_ATTR_20_RO(write_typ);
-static NVM_DEV_ATTR_20_RO(write_max);
-static NVM_DEV_ATTR_20_RO(reset_typ);
-static NVM_DEV_ATTR_20_RO(reset_max);
-
-static struct attribute *nvm_dev_attrs_20[] = {
-	&dev_attr_version.attr,
-	&dev_attr_capabilities.attr,
-
+	/* 2.0 attrs */
 	&dev_attr_groups.attr,
 	&dev_attr_punits.attr,
 	&dev_attr_chunks.attr,
@@ -1255,8 +1246,6 @@ static struct attribute *nvm_dev_attrs_20[] = {
 	&dev_attr_maxocpu.attr,
 	&dev_attr_mw_cunits.attr,
 
-	&dev_attr_read_typ.attr,
-	&dev_attr_read_max.attr,
 	&dev_attr_write_typ.attr,
 	&dev_attr_write_max.attr,
 	&dev_attr_reset_typ.attr,
@@ -1265,25 +1254,35 @@ static struct attribute *nvm_dev_attrs_20[] = {
 	NULL,
 };
 
-static const struct attribute_group nvm_dev_attr_group_20 = {
-	.name		= "lightnvm",
-	.attrs		= nvm_dev_attrs_20,
-};
-
-void nvme_nvm_register_sysfs(struct nvme_ns *ns)
+static umode_t nvm_dev_attrs_visible(struct kobject *kobj,
+				     struct attribute *attr, int index)
 {
+	struct device *dev = container_of(kobj, struct device, kobj);
+	struct gendisk *disk = dev_to_disk(dev);
+	struct nvme_ns *ns = disk->private_data;
 	struct nvm_dev *ndev = ns->ndev;
-	struct nvm_geo *geo = &ndev->geo;
+	struct device_attribute *dev_attr =
+		container_of(attr, typeof(*dev_attr), attr);
 
-	if (!ndev)
-		return;
+	if (dev_attr->show == nvm_dev_attr_show)
+		return attr->mode;
 
-	switch (geo->major_ver_id) {
+	switch (ndev ? ndev->geo.major_ver_id : 0) {
 	case 1:
-		nvme_ns_id_attr_groups[1] = &nvm_dev_attr_group_12;
+		if (dev_attr->show == nvm_dev_attr_show_12)
+			return attr->mode;
 		break;
 	case 2:
-		nvme_ns_id_attr_groups[1] = &nvm_dev_attr_group_20;
+		if (dev_attr->show == nvm_dev_attr_show_20)
+			return attr->mode;
 		break;
 	}
+
+	return 0;
 }
+
+const struct attribute_group nvm_dev_attr_group = {
+	.name		= "lightnvm",
+	.attrs		= nvm_dev_attrs,
+	.is_visible	= nvm_dev_attrs_visible,
+};
diff --git a/drivers/nvme/host/nvme.h b/drivers/nvme/host/nvme.h
index 9ba6d67d8e0a..2503f8fd54da 100644
--- a/drivers/nvme/host/nvme.h
+++ b/drivers/nvme/host/nvme.h
@@ -551,7 +551,7 @@ static inline void nvme_mpath_stop(struct nvme_ctrl *ctrl)
 void nvme_nvm_update_nvm_info(struct nvme_ns *ns);
 int nvme_nvm_register(struct nvme_ns *ns, char *disk_name, int node);
 void nvme_nvm_unregister(struct nvme_ns *ns);
-void nvme_nvm_register_sysfs(struct nvme_ns *ns);
+extern const struct attribute_group nvme_nvm_attr_group;
 int nvme_nvm_ioctl(struct nvme_ns *ns, unsigned int cmd, unsigned long arg);
 #else
 static inline void nvme_nvm_update_nvm_info(struct nvme_ns *ns) {};
@@ -562,7 +562,6 @@ static inline int nvme_nvm_register(struct nvme_ns *ns, char *disk_name,
 }
 
 static inline void nvme_nvm_unregister(struct nvme_ns *ns) {};
-static inline void nvme_nvm_register_sysfs(struct nvme_ns *ns) {};
 static inline int nvme_nvm_ioctl(struct nvme_ns *ns, unsigned int cmd,
 							unsigned long arg)
 {
Sagi Grimberg Aug. 17, 2018, 10:47 p.m. UTC | #11
> Hello Christoph,
> 
> How about applying the patch below on top of Hannes' patch? The patch below
> has the advantage that it completely separates the open channel sysfs
> attributes from the NVMe core sysfs attributes - the open channel code
> doesn't have to know anything about the NVMe core sysfs attributes and the
> NVMe core does not have to know anything about the open channel sysfs
> attributes.

This looks better to me Bart (unless I'm missing something)
Hannes Reinecke Aug. 20, 2018, 6:34 a.m. UTC | #12
On 08/17/2018 10:04 PM, Bart Van Assche wrote:
> On Fri, 2018-08-17 at 09:00 +0200, hch@lst.de wrote:
>> On Tue, Aug 14, 2018 at 03:44:57PM +0000, Bart Van Assche wrote:
>>> On Tue, 2018-08-14 at 17:39 +0200, Hannes Reinecke wrote:
>>>> While I have considered having nvme_nvm_register_sysfs() returning a
>>>> pointer I would then have to remove the 'static' declaration from the
>>>> nvm_dev_attr_group_12/20.
>>>> Which I didn't really like, either.
>>>
>>> Hmm ... I don't see why the static declaration would have to be removed from
>>> nvm_dev_attr_group_12/20 if nvme_nvm_register_sysfs() would return a pointer?
>>> Am I perhaps missing something?
>>
>> No, I think that would be the preferable approach IFF patching the global
>> table of groups would be viable.  I don't think it is, though - we can
>> have both normal NVMe and LightNVM devices in the same system, so we
>> can't just patch it over.
>>
>> So we'll need three different attribut group arrays:
>>
>> const struct attribute_group *nvme_ns_id_attr_groups[] = {
>> 	&nvme_ns_id_attr_group,
>> 	NULL,
>> };
>>
>> const struct attribute_group *lightnvm12_ns_id_attr_groups[] = {
>> 	&nvme_ns_id_attr_group,
>> 	&nvm_dev_attr_group_12,
>> 	NULL,
>> };
>>
>> const struct attribute_group *lightnvm20_ns_id_attr_groups[] = {
>> 	&nvme_ns_id_attr_group,
>> 	&nvm_dev_attr_group_20,
>> 	NULL,
>> };
>>
>> and a function to select which one to use.
> 
> Hello Christoph,
> 
> How about applying the patch below on top of Hannes' patch? The patch below
> has the advantage that it completely separates the open channel sysfs
> attributes from the NVMe core sysfs attributes - the open channel code
> doesn't have to know anything about the NVMe core sysfs attributes and the
> NVMe core does not have to know anything about the open channel sysfs
> attributes.
> 
Yes, this looks like the best approach.
I'll fold it into my patchset.

Cheers,

Hannes
diff mbox series

Patch

diff --git a/drivers/nvme/host/core.c b/drivers/nvme/host/core.c
index 0e824e8c8fd7..8e26d98e9a8f 100644
--- a/drivers/nvme/host/core.c
+++ b/drivers/nvme/host/core.c
@@ -2734,6 +2734,12 @@  const struct attribute_group nvme_ns_id_attr_group = {
 	.is_visible	= nvme_ns_id_attrs_are_visible,
 };
 
+const struct attribute_group *nvme_ns_id_attr_groups[] = {
+	&nvme_ns_id_attr_group,
+	NULL, /* Will be filled in by lightnvm if present */
+	NULL,
+};
+
 #define nvme_show_str_function(field)						\
 static ssize_t  field##_show(struct device *dev,				\
 			    struct device_attribute *attr, char *buf)		\
@@ -3099,14 +3105,9 @@  static void nvme_alloc_ns(struct nvme_ctrl *ctrl, unsigned nsid)
 
 	nvme_get_ctrl(ctrl);
 
-	device_add_disk(ctrl->device, ns->disk, NULL);
-	if (sysfs_create_group(&disk_to_dev(ns->disk)->kobj,
-					&nvme_ns_id_attr_group))
-		pr_warn("%s: failed to create sysfs group for identification\n",
-			ns->disk->disk_name);
-	if (ns->ndev && nvme_nvm_register_sysfs(ns))
-		pr_warn("%s: failed to register lightnvm sysfs group for identification\n",
-			ns->disk->disk_name);
+	if (ns->ndev)
+		nvme_nvm_register_sysfs(ns);
+	device_add_disk(ctrl->device, ns->disk, nvme_ns_id_attr_groups);
 
 	nvme_mpath_add_disk(ns, id);
 	nvme_fault_inject_init(ns);
@@ -3132,10 +3133,6 @@  static void nvme_ns_remove(struct nvme_ns *ns)
 
 	nvme_fault_inject_fini(ns);
 	if (ns->disk && ns->disk->flags & GENHD_FL_UP) {
-		sysfs_remove_group(&disk_to_dev(ns->disk)->kobj,
-					&nvme_ns_id_attr_group);
-		if (ns->ndev)
-			nvme_nvm_unregister_sysfs(ns);
 		del_gendisk(ns->disk);
 		blk_cleanup_queue(ns->queue);
 		if (blk_get_integrity(ns->disk))
diff --git a/drivers/nvme/host/lightnvm.c b/drivers/nvme/host/lightnvm.c
index 6fe5923c95d4..7bf2f9da6293 100644
--- a/drivers/nvme/host/lightnvm.c
+++ b/drivers/nvme/host/lightnvm.c
@@ -1270,39 +1270,20 @@  static const struct attribute_group nvm_dev_attr_group_20 = {
 	.attrs		= nvm_dev_attrs_20,
 };
 
-int nvme_nvm_register_sysfs(struct nvme_ns *ns)
+void nvme_nvm_register_sysfs(struct nvme_ns *ns)
 {
 	struct nvm_dev *ndev = ns->ndev;
 	struct nvm_geo *geo = &ndev->geo;
 
 	if (!ndev)
-		return -EINVAL;
-
-	switch (geo->major_ver_id) {
-	case 1:
-		return sysfs_create_group(&disk_to_dev(ns->disk)->kobj,
-					&nvm_dev_attr_group_12);
-	case 2:
-		return sysfs_create_group(&disk_to_dev(ns->disk)->kobj,
-					&nvm_dev_attr_group_20);
-	}
-
-	return -EINVAL;
-}
-
-void nvme_nvm_unregister_sysfs(struct nvme_ns *ns)
-{
-	struct nvm_dev *ndev = ns->ndev;
-	struct nvm_geo *geo = &ndev->geo;
+		return;
 
 	switch (geo->major_ver_id) {
 	case 1:
-		sysfs_remove_group(&disk_to_dev(ns->disk)->kobj,
-					&nvm_dev_attr_group_12);
+		nvme_ns_id_attr_groups[1] = &nvm_dev_attr_group_12;
 		break;
 	case 2:
-		sysfs_remove_group(&disk_to_dev(ns->disk)->kobj,
-					&nvm_dev_attr_group_20);
+		nvme_ns_id_attr_groups[1] = &nvm_dev_attr_group_20;
 		break;
 	}
 }
diff --git a/drivers/nvme/host/multipath.c b/drivers/nvme/host/multipath.c
index 477af51d01e8..8e846095c42d 100644
--- a/drivers/nvme/host/multipath.c
+++ b/drivers/nvme/host/multipath.c
@@ -282,13 +282,9 @@  static void nvme_mpath_set_live(struct nvme_ns *ns)
 	if (!head->disk)
 		return;
 
-	if (!(head->disk->flags & GENHD_FL_UP)) {
-		device_add_disk(&head->subsys->dev, head->disk, NULL);
-		if (sysfs_create_group(&disk_to_dev(head->disk)->kobj,
-				&nvme_ns_id_attr_group))
-			dev_warn(&head->subsys->dev,
-				 "failed to create id group.\n");
-	}
+	if (!(head->disk->flags & GENHD_FL_UP))
+		device_add_disk(&head->subsys->dev, head->disk,
+				nvme_ns_id_attr_groups);
 
 	kblockd_schedule_work(&ns->head->requeue_work);
 }
@@ -494,11 +490,8 @@  void nvme_mpath_remove_disk(struct nvme_ns_head *head)
 {
 	if (!head->disk)
 		return;
-	if (head->disk->flags & GENHD_FL_UP) {
-		sysfs_remove_group(&disk_to_dev(head->disk)->kobj,
-				   &nvme_ns_id_attr_group);
+	if (head->disk->flags & GENHD_FL_UP)
 		del_gendisk(head->disk);
-	}
 	blk_set_queue_dying(head->disk->queue);
 	/* make sure all pending bios are cleaned up */
 	kblockd_schedule_work(&head->requeue_work);
diff --git a/drivers/nvme/host/nvme.h b/drivers/nvme/host/nvme.h
index bb4a2003c097..9ba6d67d8e0a 100644
--- a/drivers/nvme/host/nvme.h
+++ b/drivers/nvme/host/nvme.h
@@ -459,7 +459,7 @@  int nvme_delete_ctrl_sync(struct nvme_ctrl *ctrl);
 int nvme_get_log(struct nvme_ctrl *ctrl, u32 nsid, u8 log_page, u8 lsp,
 		void *log, size_t size, u64 offset);
 
-extern const struct attribute_group nvme_ns_id_attr_group;
+extern const struct attribute_group *nvme_ns_id_attr_groups[];
 extern const struct block_device_operations nvme_ns_head_ops;
 
 #ifdef CONFIG_NVME_MULTIPATH
@@ -551,8 +551,7 @@  static inline void nvme_mpath_stop(struct nvme_ctrl *ctrl)
 void nvme_nvm_update_nvm_info(struct nvme_ns *ns);
 int nvme_nvm_register(struct nvme_ns *ns, char *disk_name, int node);
 void nvme_nvm_unregister(struct nvme_ns *ns);
-int nvme_nvm_register_sysfs(struct nvme_ns *ns);
-void nvme_nvm_unregister_sysfs(struct nvme_ns *ns);
+void nvme_nvm_register_sysfs(struct nvme_ns *ns);
 int nvme_nvm_ioctl(struct nvme_ns *ns, unsigned int cmd, unsigned long arg);
 #else
 static inline void nvme_nvm_update_nvm_info(struct nvme_ns *ns) {};
@@ -563,11 +562,7 @@  static inline int nvme_nvm_register(struct nvme_ns *ns, char *disk_name,
 }
 
 static inline void nvme_nvm_unregister(struct nvme_ns *ns) {};
-static inline int nvme_nvm_register_sysfs(struct nvme_ns *ns)
-{
-	return 0;
-}
-static inline void nvme_nvm_unregister_sysfs(struct nvme_ns *ns) {};
+static inline void nvme_nvm_register_sysfs(struct nvme_ns *ns) {};
 static inline int nvme_nvm_ioctl(struct nvme_ns *ns, unsigned int cmd,
 							unsigned long arg)
 {