diff mbox

[RFC] lightnvm: expose configuration through sysfs

Message ID 1461777537-8145-2-git-send-email-slund@cnexlabs.com (mailing list archive)
State New, archived
Headers show

Commit Message

Simon A. F. Lund April 27, 2016, 5:18 p.m. UTC
An open-channel SSD exposes its geometry to the host. Allowing the host to know
the boundaries of the LUNs, flash blocks, and flags pages, enabling the host to
write to its physical media.

The configuration information is kept within the kernel, and not exported to
user-space for consumption. This patch exposes the configuration through sysfs
and enables user-space libraries, such as liblightnvm, to use the sysfs
implementation to get the geometry of an open-channel SSD.

The configuration looks like this:

/sys/devices/virtual/misc/lightnvm
??? devices
?   ??? nvme0n1
?       ??? capabilities
?       ??? device_mode
?       ??? grp0
?       ?   ??? channel_parallelism
?       ?   ??? erase_max
?       ?   ??? erase_typ
?       ?   ??? flash_media_type
?       ?   ??? media_capabilities
?       ?   ??? media_type
?       ?   ??? multiplane
?       ?   ??? num_blocks
?       ?   ??? num_channels
?       ?   ??? num_luns
?       ?   ??? num_pages
?       ?   ??? num_planes
?       ?   ??? page_size
?       ?   ??? prog_max
?       ?   ??? prog_typ
?       ?   ??? read_max
?       ?   ??? read_typ
?       ?   ??? sector_oob_size
?       ?   ??? sector_size
?       ??? media_manager
?       ??? num_groups
?       ??? ppa_format
?       ??? vendor_opcode
?       ??? version
??? targets
    ??? tgt0
            ??? type

With sample values from qemu instance:

/sys/devices/virtual/misc/lightnvm/devices/nvme0n1/capabilities: 3
/sys/devices/virtual/misc/lightnvm/devices/nvme0n1/device_mode: 1
/sys/devices/virtual/misc/lightnvm/devices/nvme0n1/grp0/channel_parallelism: 0
/sys/devices/virtual/misc/lightnvm/devices/nvme0n1/grp0/erase_max: 1000000
/sys/devices/virtual/misc/lightnvm/devices/nvme0n1/grp0/erase_typ: 1000000
/sys/devices/virtual/misc/lightnvm/devices/nvme0n1/grp0/flash_media_type: 0
/sys/devices/virtual/misc/lightnvm/devices/nvme0n1/grp0/media_capabilities:
0x00000001
/sys/devices/virtual/misc/lightnvm/devices/nvme0n1/grp0/media_type: 0
/sys/devices/virtual/misc/lightnvm/devices/nvme0n1/grp0/multiplane: 0x00010101
/sys/devices/virtual/misc/lightnvm/devices/nvme0n1/grp0/num_blocks: 1022
/sys/devices/virtual/misc/lightnvm/devices/nvme0n1/grp0/num_channels: 1
/sys/devices/virtual/misc/lightnvm/devices/nvme0n1/grp0/num_luns: 4
/sys/devices/virtual/misc/lightnvm/devices/nvme0n1/grp0/num_pages: 64
/sys/devices/virtual/misc/lightnvm/devices/nvme0n1/grp0/num_planes: 1
/sys/devices/virtual/misc/lightnvm/devices/nvme0n1/grp0/page_size: 4096
/sys/devices/virtual/misc/lightnvm/devices/nvme0n1/grp0/prog_max: 100000
/sys/devices/virtual/misc/lightnvm/devices/nvme0n1/grp0/prog_typ: 100000
/sys/devices/virtual/misc/lightnvm/devices/nvme0n1/grp0/read_max: 10000
/sys/devices/virtual/misc/lightnvm/devices/nvme0n1/grp0/read_typ: 10000
/sys/devices/virtual/misc/lightnvm/devices/nvme0n1/grp0/sector_oob_size: 0
/sys/devices/virtual/misc/lightnvm/devices/nvme0n1/grp0/sector_size: 4096
/sys/devices/virtual/misc/lightnvm/devices/nvme0n1/media_manager: gennvm
/sys/devices/virtual/misc/lightnvm/devices/nvme0n1/num_groups: 1
/sys/devices/virtual/misc/lightnvm/devices/nvme0n1/ppa_format:
0x380830082808001010102008
/sys/devices/virtual/misc/lightnvm/devices/nvme0n1/vendor_opcode: 0
/sys/devices/virtual/misc/lightnvm/devices/nvme0n1/version: 1
/sys/devices/virtual/misc/lightnvm/targets/tgt0/type: rrpc

Signed-off-by: Simon A. F. Lund <slund@cnexlabs.com>
---
 Documentation/ABI/testing/sysfs-lightnvm | 244 ++++++++++++++++++
 drivers/lightnvm/Makefile                |   2 +-
 drivers/lightnvm/core.c                  |  33 ++-
 drivers/lightnvm/sysfs.c                 | 418 +++++++++++++++++++++++++++++++
 drivers/lightnvm/sysfs.h                 |  18 ++
 include/linux/lightnvm.h                 |   4 +
 6 files changed, 712 insertions(+), 7 deletions(-)
 create mode 100644 Documentation/ABI/testing/sysfs-lightnvm
 create mode 100644 drivers/lightnvm/sysfs.c
 create mode 100644 drivers/lightnvm/sysfs.h

Comments

Matias Bjorling April 27, 2016, 6:20 p.m. UTC | #1
On 04/27/2016 07:41 PM, Greg KH wrote:
> On Wed, Apr 27, 2016 at 10:18:57AM -0700, Simon A. F. Lund wrote:
>> --- a/include/linux/lightnvm.h
>> +++ b/include/linux/lightnvm.h
>> @@ -174,6 +174,7 @@ struct nvm_id_group {
>>   	u16	cpar;
>>
>>   	struct nvm_id_lp_tbl lptbl;
>> +	struct kobject kobj;
>>   };
>>
>>   struct nvm_addr_format {
>> @@ -205,6 +206,7 @@ struct nvm_target {
>>   	struct list_head list;
>>   	struct nvm_tgt_type *type;
>>   	struct gendisk *disk;
>> +	struct kobject kobj;
>>   };
>>
>>   struct nvm_tgt_instance {
>> @@ -360,6 +362,8 @@ struct nvm_dev {
>>
>>   	struct mutex mlock;
>>   	spinlock_t lock;
>> +
>> +	struct kobject kobj;
>>   };
>>
>>   static inline struct ppa_addr generic_to_dev_addr(struct nvm_dev *dev,
>
> Never use "raw" kobjects in a driver for a device.  You just guaranteed
> that userspace tools will not see these devices or attributes, which
> implies you didn't really test this using libudev :(
>
> Please use real devices, attached to the real devices your disks already
> have in the tree.
>
> And are you sure you didn't just mess up your reference counting by
> now having the lifecycle of these structures be dictated by the kobject?
>
> thanks,
>
> greg k-h
>

Hi Greg,

Thanks for the feedback.

lightnvm doesn't have anything to hook up with in the /dev/block/* until 
a device is exposed through a target. A device goes into a staging area, 
and then later is configured to expose a block device.

In the case of NVMe device driver, the driver brings up a device, 
identifies it as a lightnvm device, then calls nvm_register and 
registers the device. It skips the registration as a block device.

At the nvm_register point, the user can list the available devices 
through an ioctl, and then choose a target to put on top. The target 
will then expose it as a block device.

This might not be the ideal way. I like your input on what would be the 
proper way to expose such a device.

-Matias
--
To unsubscribe from this list: send the line "unsubscribe linux-block" in
the body of a message to majordomo@vger.kernel.org
More majordomo info at  http://vger.kernel.org/majordomo-info.html
Greg KH April 27, 2016, 7 p.m. UTC | #2
On Wed, Apr 27, 2016 at 08:20:33PM +0200, Matias Bjørling wrote:
> 
> 
> On 04/27/2016 07:41 PM, Greg KH wrote:
> > On Wed, Apr 27, 2016 at 10:18:57AM -0700, Simon A. F. Lund wrote:
> > > --- a/include/linux/lightnvm.h
> > > +++ b/include/linux/lightnvm.h
> > > @@ -174,6 +174,7 @@ struct nvm_id_group {
> > >   	u16	cpar;
> > > 
> > >   	struct nvm_id_lp_tbl lptbl;
> > > +	struct kobject kobj;
> > >   };
> > > 
> > >   struct nvm_addr_format {
> > > @@ -205,6 +206,7 @@ struct nvm_target {
> > >   	struct list_head list;
> > >   	struct nvm_tgt_type *type;
> > >   	struct gendisk *disk;
> > > +	struct kobject kobj;
> > >   };
> > > 
> > >   struct nvm_tgt_instance {
> > > @@ -360,6 +362,8 @@ struct nvm_dev {
> > > 
> > >   	struct mutex mlock;
> > >   	spinlock_t lock;
> > > +
> > > +	struct kobject kobj;
> > >   };
> > > 
> > >   static inline struct ppa_addr generic_to_dev_addr(struct nvm_dev *dev,
> > 
> > Never use "raw" kobjects in a driver for a device.  You just guaranteed
> > that userspace tools will not see these devices or attributes, which
> > implies you didn't really test this using libudev :(
> > 
> > Please use real devices, attached to the real devices your disks already
> > have in the tree.
> > 
> > And are you sure you didn't just mess up your reference counting by
> > now having the lifecycle of these structures be dictated by the kobject?
> > 
> > thanks,
> > 
> > greg k-h
> > 
> 
> Hi Greg,
> 
> Thanks for the feedback.
> 
> lightnvm doesn't have anything to hook up with in the /dev/block/* until a
> device is exposed through a target. A device goes into a staging area, and
> then later is configured to expose a block device.
> 
> In the case of NVMe device driver, the driver brings up a device, identifies
> it as a lightnvm device, then calls nvm_register and registers the device.
> It skips the registration as a block device.

But you could register it with sysfs at this point in time, giving you
a place in the device tree.  Which would be good.

> At the nvm_register point, the user can list the available devices through
> an ioctl, and then choose a target to put on top. The target will then
> expose it as a block device.

Then move the device at this point in time.

> This might not be the ideal way. I like your input on what would be the
> proper way to expose such a device.

See above.

thanks,

greg k-h
--
To unsubscribe from this list: send the line "unsubscribe linux-block" in
the body of a message to majordomo@vger.kernel.org
More majordomo info at  http://vger.kernel.org/majordomo-info.html
Matias Bjorling April 27, 2016, 7:26 p.m. UTC | #3
On 04/27/2016 09:00 PM, Greg KH wrote:
> On Wed, Apr 27, 2016 at 08:20:33PM +0200, Matias Bjørling wrote:
>>
>>
>> On 04/27/2016 07:41 PM, Greg KH wrote:
>>> On Wed, Apr 27, 2016 at 10:18:57AM -0700, Simon A. F. Lund wrote:
>>>> --- a/include/linux/lightnvm.h
>>>> +++ b/include/linux/lightnvm.h
>>>> @@ -174,6 +174,7 @@ struct nvm_id_group {
>>>>    	u16	cpar;
>>>>
>>>>    	struct nvm_id_lp_tbl lptbl;
>>>> +	struct kobject kobj;
>>>>    };
>>>>
>>>>    struct nvm_addr_format {
>>>> @@ -205,6 +206,7 @@ struct nvm_target {
>>>>    	struct list_head list;
>>>>    	struct nvm_tgt_type *type;
>>>>    	struct gendisk *disk;
>>>> +	struct kobject kobj;
>>>>    };
>>>>
>>>>    struct nvm_tgt_instance {
>>>> @@ -360,6 +362,8 @@ struct nvm_dev {
>>>>
>>>>    	struct mutex mlock;
>>>>    	spinlock_t lock;
>>>> +
>>>> +	struct kobject kobj;
>>>>    };
>>>>
>>>>    static inline struct ppa_addr generic_to_dev_addr(struct nvm_dev *dev,
>>>
>>> Never use "raw" kobjects in a driver for a device.  You just guaranteed
>>> that userspace tools will not see these devices or attributes, which
>>> implies you didn't really test this using libudev :(
>>>
>>> Please use real devices, attached to the real devices your disks already
>>> have in the tree.
>>>
>>> And are you sure you didn't just mess up your reference counting by
>>> now having the lifecycle of these structures be dictated by the kobject?
>>>
>>> thanks,
>>>
>>> greg k-h
>>>
>>
>> Hi Greg,
>>
>> Thanks for the feedback.
>>
>> lightnvm doesn't have anything to hook up with in the /dev/block/* until a
>> device is exposed through a target. A device goes into a staging area, and
>> then later is configured to expose a block device.
>>
>> In the case of NVMe device driver, the driver brings up a device, identifies
>> it as a lightnvm device, then calls nvm_register and registers the device.
>> It skips the registration as a block device.
>
> But you could register it with sysfs at this point in time, giving you
> a place in the device tree.  Which would be good.

As an example, when the device is identified by the nvme device driver, 
the nvm_register() registers the device (e.g. nvme0n1) in sysfs and 
places it here until further configuration:

   /sys/devices/virtual/misc/lightnvm/devices/nvme0n1

It would expose a representation of the lightnvm configuration

Then when targets are added, we would put the target (e.g. tgt0) in

/sys/devices/virtual/misc/lightnvm/targets/tgt0

and that one could reference the device by

   /sys/devices/virtual/misc/lightnvm/targets/tgt0/devices/nvme0n1

     pointing to

   /sys/devices/virtual/misc/lightnvm/devices/nvme0n1

A target can span multiple devices (that's why the targets/*/devices 
link is there)

Does that make sense? and in this case, the raw kobjects make sense to 
use, as we don't have anything to bind them up to, other than the misc 
device we registered.

>
>> At the nvm_register point, the user can list the available devices through
>> an ioctl, and then choose a target to put on top. The target will then
>> expose it as a block device.
>
> Then move the device at this point in time.
>
>> This might not be the ideal way. I like your input on what would be the
>> proper way to expose such a device.
>
> See above.
>
> thanks,
>
> greg k-h
>
--
To unsubscribe from this list: send the line "unsubscribe linux-block" in
the body of a message to majordomo@vger.kernel.org
More majordomo info at  http://vger.kernel.org/majordomo-info.html
Greg KH May 6, 2016, 4:37 p.m. UTC | #4
On Wed, Apr 27, 2016 at 09:26:39PM +0200, Matias Bjørling wrote:
> On 04/27/2016 09:00 PM, Greg KH wrote:
> > On Wed, Apr 27, 2016 at 08:20:33PM +0200, Matias Bjørling wrote:
> > > 
> > > 
> > > On 04/27/2016 07:41 PM, Greg KH wrote:
> > > > On Wed, Apr 27, 2016 at 10:18:57AM -0700, Simon A. F. Lund wrote:
> > > > > --- a/include/linux/lightnvm.h
> > > > > +++ b/include/linux/lightnvm.h
> > > > > @@ -174,6 +174,7 @@ struct nvm_id_group {
> > > > >    	u16	cpar;
> > > > > 
> > > > >    	struct nvm_id_lp_tbl lptbl;
> > > > > +	struct kobject kobj;
> > > > >    };
> > > > > 
> > > > >    struct nvm_addr_format {
> > > > > @@ -205,6 +206,7 @@ struct nvm_target {
> > > > >    	struct list_head list;
> > > > >    	struct nvm_tgt_type *type;
> > > > >    	struct gendisk *disk;
> > > > > +	struct kobject kobj;
> > > > >    };
> > > > > 
> > > > >    struct nvm_tgt_instance {
> > > > > @@ -360,6 +362,8 @@ struct nvm_dev {
> > > > > 
> > > > >    	struct mutex mlock;
> > > > >    	spinlock_t lock;
> > > > > +
> > > > > +	struct kobject kobj;
> > > > >    };
> > > > > 
> > > > >    static inline struct ppa_addr generic_to_dev_addr(struct nvm_dev *dev,
> > > > 
> > > > Never use "raw" kobjects in a driver for a device.  You just guaranteed
> > > > that userspace tools will not see these devices or attributes, which
> > > > implies you didn't really test this using libudev :(
> > > > 
> > > > Please use real devices, attached to the real devices your disks already
> > > > have in the tree.
> > > > 
> > > > And are you sure you didn't just mess up your reference counting by
> > > > now having the lifecycle of these structures be dictated by the kobject?
> > > > 
> > > > thanks,
> > > > 
> > > > greg k-h
> > > > 
> > > 
> > > Hi Greg,
> > > 
> > > Thanks for the feedback.
> > > 
> > > lightnvm doesn't have anything to hook up with in the /dev/block/* until a
> > > device is exposed through a target. A device goes into a staging area, and
> > > then later is configured to expose a block device.
> > > 
> > > In the case of NVMe device driver, the driver brings up a device, identifies
> > > it as a lightnvm device, then calls nvm_register and registers the device.
> > > It skips the registration as a block device.
> > 
> > But you could register it with sysfs at this point in time, giving you
> > a place in the device tree.  Which would be good.
> 
> As an example, when the device is identified by the nvme device driver, the
> nvm_register() registers the device (e.g. nvme0n1) in sysfs and places it
> here until further configuration:
> 
>   /sys/devices/virtual/misc/lightnvm/devices/nvme0n1
> 
> It would expose a representation of the lightnvm configuration
> 
> Then when targets are added, we would put the target (e.g. tgt0) in
> 
> /sys/devices/virtual/misc/lightnvm/targets/tgt0
> 
> and that one could reference the device by
> 
>   /sys/devices/virtual/misc/lightnvm/targets/tgt0/devices/nvme0n1
> 
>     pointing to
> 
>   /sys/devices/virtual/misc/lightnvm/devices/nvme0n1
> 
> A target can span multiple devices (that's why the targets/*/devices link is
> there)
> 
> Does that make sense? and in this case, the raw kobjects make sense to use,
> as we don't have anything to bind them up to, other than the misc device we
> registered.

Maybe, but really, why not use a struct device?  You just prevented all
userspace tools from finding your devices (i.e. libudev), so you are
going to have a hard time with any tools that want to see the tree of
devices in the system.

And you also just lost the power management chain as well, the driver
model doesn't know to drop into a kobject, as it can't, so you might not
be working properly at all with that here for suspend/resume and the
like.

good luck,

greg k-h
--
To unsubscribe from this list: send the line "unsubscribe linux-block" in
the body of a message to majordomo@vger.kernel.org
More majordomo info at  http://vger.kernel.org/majordomo-info.html
Matias Bjorling May 6, 2016, 5:41 p.m. UTC | #5
On 05/06/2016 06:37 PM, Greg KH wrote:
> On Wed, Apr 27, 2016 at 09:26:39PM +0200, Matias Bjørling wrote:
>> On 04/27/2016 09:00 PM, Greg KH wrote:
>>> On Wed, Apr 27, 2016 at 08:20:33PM +0200, Matias Bjørling wrote:
>>>>
>>>>
>>>> On 04/27/2016 07:41 PM, Greg KH wrote:
>>>>> On Wed, Apr 27, 2016 at 10:18:57AM -0700, Simon A. F. Lund wrote:
>>>>>> --- a/include/linux/lightnvm.h
>>>>>> +++ b/include/linux/lightnvm.h
>>>>>> @@ -174,6 +174,7 @@ struct nvm_id_group {
>>>>>>     	u16	cpar;
>>>>>>
>>>>>>     	struct nvm_id_lp_tbl lptbl;
>>>>>> +	struct kobject kobj;
>>>>>>     };
>>>>>>
>>>>>>     struct nvm_addr_format {
>>>>>> @@ -205,6 +206,7 @@ struct nvm_target {
>>>>>>     	struct list_head list;
>>>>>>     	struct nvm_tgt_type *type;
>>>>>>     	struct gendisk *disk;
>>>>>> +	struct kobject kobj;
>>>>>>     };
>>>>>>
>>>>>>     struct nvm_tgt_instance {
>>>>>> @@ -360,6 +362,8 @@ struct nvm_dev {
>>>>>>
>>>>>>     	struct mutex mlock;
>>>>>>     	spinlock_t lock;
>>>>>> +
>>>>>> +	struct kobject kobj;
>>>>>>     };
>>>>>>
>>>>>>     static inline struct ppa_addr generic_to_dev_addr(struct nvm_dev *dev,
>>>>>
>>>>> Never use "raw" kobjects in a driver for a device.  You just guaranteed
>>>>> that userspace tools will not see these devices or attributes, which
>>>>> implies you didn't really test this using libudev :(
>>>>>
>>>>> Please use real devices, attached to the real devices your disks already
>>>>> have in the tree.
>>>>>
>>>>> And are you sure you didn't just mess up your reference counting by
>>>>> now having the lifecycle of these structures be dictated by the kobject?
>>>>>
>>>>> thanks,
>>>>>
>>>>> greg k-h
>>>>>
>>>>
>>>> Hi Greg,
>>>>
>>>> Thanks for the feedback.
>>>>
>>>> lightnvm doesn't have anything to hook up with in the /dev/block/* until a
>>>> device is exposed through a target. A device goes into a staging area, and
>>>> then later is configured to expose a block device.
>>>>
>>>> In the case of NVMe device driver, the driver brings up a device, identifies
>>>> it as a lightnvm device, then calls nvm_register and registers the device.
>>>> It skips the registration as a block device.
>>>
>>> But you could register it with sysfs at this point in time, giving you
>>> a place in the device tree.  Which would be good.
>>
>> As an example, when the device is identified by the nvme device driver, the
>> nvm_register() registers the device (e.g. nvme0n1) in sysfs and places it
>> here until further configuration:
>>
>>    /sys/devices/virtual/misc/lightnvm/devices/nvme0n1
>>
>> It would expose a representation of the lightnvm configuration
>>
>> Then when targets are added, we would put the target (e.g. tgt0) in
>>
>> /sys/devices/virtual/misc/lightnvm/targets/tgt0
>>
>> and that one could reference the device by
>>
>>    /sys/devices/virtual/misc/lightnvm/targets/tgt0/devices/nvme0n1
>>
>>      pointing to
>>
>>    /sys/devices/virtual/misc/lightnvm/devices/nvme0n1
>>
>> A target can span multiple devices (that's why the targets/*/devices link is
>> there)
>>
>> Does that make sense? and in this case, the raw kobjects make sense to use,
>> as we don't have anything to bind them up to, other than the misc device we
>> registered.
>
> Maybe, but really, why not use a struct device?  You just prevented all
> userspace tools from finding your devices (i.e. libudev), so you are
> going to have a hard time with any tools that want to see the tree of
> devices in the system.
>
> And you also just lost the power management chain as well, the driver
> model doesn't know to drop into a kobject, as it can't, so you might not
> be working properly at all with that here for suspend/resume and the
> like.
>
> good luck,
>
> greg k-h
>

Thanks greg. We are reworking the struct device into it. Requires a 
little bit more work from the nvme device driver, else it should be 
good. Thanks for your feedback.
--
To unsubscribe from this list: send the line "unsubscribe linux-block" in
the body of a message to majordomo@vger.kernel.org
More majordomo info at  http://vger.kernel.org/majordomo-info.html
diff mbox

Patch

diff --git a/Documentation/ABI/testing/sysfs-lightnvm b/Documentation/ABI/testing/sysfs-lightnvm
new file mode 100644
index 0000000..7cf9e78
--- /dev/null
+++ b/Documentation/ABI/testing/sysfs-lightnvm
@@ -0,0 +1,244 @@ 
+What:		/sys/.../lightnvm/devices/<device>/capabilities
+Date:		March 2016
+KernelVersion:	4.7
+Contact:	"Simon A. F. Lund" <slund@cnexlabs.com>
+Description:
+		 Device capabilities and feature support.
+Users:		lnvm - https://github.com/OpenChannelSSD/lnvm
+
+What:		/sys/.../lightnvm/devices/<device>/device_mode
+Date:		March 2016
+KernelVersion:	4.7
+Contact:	"Simon A. F. Lund" <slund@cnexlabs.com>
+Description:
+		 Current device operating mode.
+Users:		lnvm - https://github.com/OpenChannelSSD/lnvm
+
+What:		/sys/.../lightnvm/devices/<device>/media_manager
+Date:		March 2016
+KernelVersion:	4.7
+Contact:	"Simon A. F. Lund" <slund@cnexlabs.com>
+Description:
+		 Media manager type e.g. "gennvm".
+Users:		lnvm - https://github.com/OpenChannelSSD/lnvm
+
+What:		/sys/.../lightnvm/devices/<device>/num_groups
+Date:		March 2016
+KernelVersion:	4.7
+Contact:	"Simon A. F. Lund" <slund@cnexlabs.com>
+Description:
+		 Number of configuration groups.
+Users:		lnvm - https://github.com/OpenChannelSSD/lnvm
+
+What:		/sys/.../lightnvm/devices/<device>/ppa_format
+Date:		March 2016
+KernelVersion:	4.7
+Contact:	"Simon A. F. Lund" <slund@cnexlabs.com>
+Description:
+		 Physical Page Address format
+		 Reported as hex.
+Users:		lnvm - https://github.com/OpenChannelSSD/lnvm
+
+What:		/sys/.../lightnvm/devices/<device>/vendor_opcode
+Date:		March 2016
+KernelVersion:	4.7
+Contact:	"Simon A. F. Lund" <slund@cnexlabs.com>
+Description:
+		 Vendor NVM opcode command set.
+Users:		lnvm - https://github.com/OpenChannelSSD/lnvm
+
+What:		/sys/.../lightnvm/devices/<device>/version
+Date:		March 2016
+KernelVersion:	4.7
+Contact:	"Simon A. F. Lund" <slund@cnexlabs.com>
+Description:
+		 Version identifier.
+Users:		lnvm - https://github.com/OpenChannelSSD/lnvm
+
+What:		/sys/.../lightnvm/devices/<device>/<grp>/channel_parallelism
+Date:		March 2016
+KernelVersion:	4.7
+Contact:	"Simon A. F. Lund" <slund@cnexlabs.com>
+Description:
+		 Number of parallel commands in-flight within a channel.
+Users:		lnvm - https://github.com/OpenChannelSSD/lnvm
+
+What:		/sys/.../lightnvm/devices/<device>/<grp>/erase_max
+Date:		March 2016
+KernelVersion:	4.7
+Contact:	"Simon A. F. Lund" <slund@cnexlabs.com>
+Description:
+		 Maximum page erase time (in ns).
+Users:		lnvm - https://github.com/OpenChannelSSD/lnvm
+
+What:		/sys/.../lightnvm/devices/<device>/<grp>/erase_typ
+Date:		March 2016
+KernelVersion:	4.7
+Contact:	"Simon A. F. Lund" <slund@cnexlabs.com>
+Description:
+		 Type page erase time (in ns).
+Users:		lnvm - https://github.com/OpenChannelSSD/lnvm
+
+What:		/sys/.../lightnvm/devices/<device>/<grp>/flash_media_type
+Date:		March 2016
+KernelVersion:	4.7
+Contact:	"Simon A. F. Lund" <slund@cnexlabs.com>
+Description:
+		 0: Single bit level cell flash (SLC)
+		 1: Two bit level cell flash (MLC)
+		 2: Three bit level cell flash (TLC)
+Users:		lnvm - https://github.com/OpenChannelSSD/lnvm
+
+What:		/sys/.../lightnvm/devices/<device>/<grp>/media_capabilities
+Date:		March 2016
+KernelVersion:	4.7
+Contact:	"Simon A. F. Lund" <slund@cnexlabs.com>
+Description:
+		 Media and controller capabilties for flash memories.
+		 Represented as a bit-string, reported as hex.
+
+		 Bit	Feature
+		 ===============================
+		 0:	SLC mode
+		 1:	Command suspension
+		 2:	Scramble ON/OFF
+		 3:	Encryption
+Users:		lnvm - https://github.com/OpenChannelSSD/lnvm
+
+What:		/sys/.../lightnvm/devices/<device>/<grp>/media_type
+Date:		March 2016
+KernelVersion:	4.7
+Contact:	"Simon A. F. Lund" <slund@cnexlabs.com>
+Description:
+		 The rest of the <grp> attributes are determined by the value
+		 of the media_type.
+
+		 0: NAND Flash Memory
+Users:		lnvm - https://github.com/OpenChannelSSD/lnvm
+
+What:		/sys/.../lightnvm/devices/<device>/<grp>/multiplane
+Date:		March 2016
+KernelVersion:	4.7
+Contact:	"Simon A. F. Lund" <slund@cnexlabs.com>
+Description:
+		 Support for multi-plane operations.
+		 Represented as a bitstring reported as hex.
+
+		 Bit	Feature
+		 ===============================
+		 0:	Single plane read
+		 1:	Dual plane read
+		 2:	Quad plane read
+		 8:	Single plane program
+		 9:	Dual plane program
+		 10:	Quad plane program
+		 16:	Single plane erase
+		 17:	Dual plane erase
+		 18:	Quad plane erase
+Users:		lnvm - https://github.com/OpenChannelSSD/lnvm
+
+What:		/sys/.../lightnvm/devices/<device>/<grp>/num_blocks
+Date:		March 2016
+KernelVersion:	4.7
+Contact:	"Simon A. F. Lund" <slund@cnexlabs.com>
+Description:
+		 Number of flash block per plane.
+Users:		lnvm - https://github.com/OpenChannelSSD/lnvm
+
+What:		/sys/.../lightnvm/devices/<device>/<grp>/num_channels
+Date:		March 2016
+KernelVersion:	4.7
+Contact:	"Simon A. F. Lund" <slund@cnexlabs.com>
+Description:
+		 Number of channels in controller.
+Users:		lnvm - https://github.com/OpenChannelSSD/lnvm
+
+What:		/sys/.../lightnvm/devices/<device>/<grp>/num_luns
+Date:		March 2016
+KernelVersion:	4.7
+Contact:	"Simon A. F. Lund" <slund@cnexlabs.com>
+Description:
+		 Number of Logical Unit Numbers (LUNs) per channel.
+Users:		lnvm - https://github.com/OpenChannelSSD/lnvm
+
+What:		/sys/.../lightnvm/devices/<device>/<grp>/num_pages
+Date:		March 2016
+KernelVersion:	4.7
+Contact:	"Simon A. F. Lund" <slund@cnexlabs.com>
+Description:
+		 Number of flash pages per block.
+Users:		lnvm - https://github.com/OpenChannelSSD/lnvm
+
+What:		/sys/.../lightnvm/devices/<device>/<grp>/num_planes
+Date:		March 2016
+KernelVersion:	4.7
+Contact:	"Simon A. F. Lund" <slund@cnexlabs.com>
+Description:
+		 Number of flash planes per LUN.
+Users:		lnvm - https://github.com/OpenChannelSSD/lnvm
+
+What:		/sys/.../lightnvm/devices/<device>/<grp>/page_size
+Date:		March 2016
+KernelVersion:	4.7
+Contact:	"Simon A. F. Lund" <slund@cnexlabs.com>
+Description:
+		 Number of bytes per flash page.
+Users:		lnvm - https://github.com/OpenChannelSSD/lnvm
+
+What:		/sys/.../lightnvm/devices/<device>/<grp>/prog_max
+Date:		March 2016
+KernelVersion:	4.7
+Contact:	"Simon A. F. Lund" <slund@cnexlabs.com>
+Description:
+		 Maximum page program time (in ns).
+Users:		lnvm - https://github.com/OpenChannelSSD/lnvm
+
+What:		/sys/.../lightnvm/devices/<device>/<grp>/prog_typ
+Date:		March 2016
+KernelVersion:	4.7
+Contact:	"Simon A. F. Lund" <slund@cnexlabs.com>
+Description:
+		 Typical page program time (in ns).
+Users:		lnvm - https://github.com/OpenChannelSSD/lnvm
+
+What:		/sys/.../lightnvm/devices/<device>/<grp>/read_max
+Date:		March 2016
+KernelVersion:	4.7
+Contact:	"Simon A. F. Lund" <slund@cnexlabs.com>
+Description:
+		 Maximum page read time (in ns).
+Users:		lnvm - https://github.com/OpenChannelSSD/lnvm
+
+What:		/sys/.../lightnvm/devices/<device>/<grp>/read_typ
+Date:		March 2016
+KernelVersion:	4.7
+Contact:	"Simon A. F. Lund" <slund@cnexlabs.com>
+Description:
+		 Typical page read time (in ns).
+Users:		lnvm - https://github.com/OpenChannelSSD/lnvm
+
+What:		/sys/.../lightnvm/devices/<device>/<grp>/sector_oob_size
+Date:		March 2016
+KernelVersion:	4.7
+Contact:	"Simon A. F. Lund" <slund@cnexlabs.com>
+Description:
+		 Per-sector metadata (in bytes)
+Users:		lnvm - https://github.com/OpenChannelSSD/lnvm
+
+What:		/sys/.../lightnvm/devices/<device>/<grp>/sector_size
+Date:		March 2016
+KernelVersion:	4.7
+Contact:	"Simon A. F. Lund" <slund@cnexlabs.com>
+Description:
+		 Controller defined minimum data unit protected by ECC
+		 (in bytes). For example 4096 bytes.
+Users:		lnvm - https://github.com/OpenChannelSSD/lnvm
+
+What:		/sys/.../lightnvm/targets/<tgt>/type
+Date:		March 2016
+KernelVersion:	4.7
+Contact:	"Simon A. F. Lund" <slund@cnexlabs.com>
+Description:
+		 Target type e.g. "rrpc", "pblk".
+Users:		lnvm - https://github.com/OpenChannelSSD/lnvm
+
diff --git a/drivers/lightnvm/Makefile b/drivers/lightnvm/Makefile
index a7a0a22..1f6b652 100644
--- a/drivers/lightnvm/Makefile
+++ b/drivers/lightnvm/Makefile
@@ -2,6 +2,6 @@ 
 # Makefile for Open-Channel SSDs.
 #
 
-obj-$(CONFIG_NVM)		:= core.o sysblk.o
+obj-$(CONFIG_NVM)		:= core.o sysblk.o sysfs.o
 obj-$(CONFIG_NVM_GENNVM) 	+= gennvm.o
 obj-$(CONFIG_NVM_RRPC)		+= rrpc.o
diff --git a/drivers/lightnvm/core.c b/drivers/lightnvm/core.c
index 0296223..8a7f079 100644
--- a/drivers/lightnvm/core.c
+++ b/drivers/lightnvm/core.c
@@ -30,6 +30,8 @@ 
 #include <linux/sched/sysctl.h>
 #include <uapi/linux/lightnvm.h>
 
+#include "sysfs.h"
+
 static LIST_HEAD(nvm_tgt_types);
 static LIST_HEAD(nvm_mgrs);
 static LIST_HEAD(nvm_devices);
@@ -684,6 +686,10 @@  int nvm_register(struct request_queue *q, char *disk_name,
 		}
 	}
 
+	ret = nvm_sysfs_register_dev(dev);
+	if (ret)
+		goto err_ppalist;
+
 	if (dev->identity.cap & NVM_ID_DCAP_BBLKMGMT) {
 		ret = nvm_get_sysblock(dev, &dev->sb);
 		if (!ret)
@@ -700,6 +706,9 @@  int nvm_register(struct request_queue *q, char *disk_name,
 	up_write(&nvm_lock);
 
 	return 0;
+err_ppalist:
+	if (dev->ppalist_pool)
+		dev->ops->destroy_dma_pool(dev->ppalist_pool);
 err_init:
 	kfree(dev->lun_map);
 	kfree(dev);
@@ -724,7 +733,8 @@  void nvm_unregister(char *disk_name)
 	up_write(&nvm_lock);
 
 	nvm_exit(dev);
-	kfree(dev);
+
+	nvm_sysfs_unregister_dev(dev);
 }
 EXPORT_SYMBOL(nvm_unregister);
 
@@ -763,7 +773,7 @@  static int nvm_create_target(struct nvm_dev *dev,
 	}
 	up_write(&nvm_lock);
 
-	t = kmalloc(sizeof(struct nvm_target), GFP_KERNEL);
+	t = kzalloc(sizeof(struct nvm_target), GFP_KERNEL);
 	if (!t)
 		return -ENOMEM;
 
@@ -792,12 +802,15 @@  static int nvm_create_target(struct nvm_dev *dev,
 
 	blk_queue_max_hw_sectors(tqueue, 8 * dev->ops->max_phys_sect);
 
-	set_capacity(tdisk, tt->capacity(targetdata));
-	add_disk(tdisk);
-
 	t->type = tt;
 	t->disk = tdisk;
 
+	if (nvm_sysfs_register_target(t))
+		goto err_init;
+
+	set_capacity(tdisk, tt->capacity(targetdata));
+	add_disk(tdisk);
+
 	down_write(&nvm_lock);
 	list_add_tail(&t->list, &nvm_targets);
 	up_write(&nvm_lock);
@@ -829,7 +842,8 @@  static void nvm_remove_target(struct nvm_target *t)
 	put_disk(tdisk);
 
 	list_del(&t->list);
-	kfree(t);
+
+	nvm_sysfs_unregister_target(t);
 }
 
 static int __nvm_configure_create(struct nvm_ioctl_create *create)
@@ -1276,11 +1290,18 @@  static int __init nvm_mod_init(void)
 	if (ret)
 		pr_err("nvm: misc_register failed for control device");
 
+	ret = nvm_sysfs_register(&_nvm_misc);
+	if (ret) {
+		pr_err("nvm: sysfs registration failed.\n");
+		misc_deregister(&_nvm_misc);
+	}
+
 	return ret;
 }
 
 static void __exit nvm_mod_exit(void)
 {
+	nvm_sysfs_unregister(&_nvm_misc);
 	misc_deregister(&_nvm_misc);
 }
 
diff --git a/drivers/lightnvm/sysfs.c b/drivers/lightnvm/sysfs.c
new file mode 100644
index 0000000..725f8df
--- /dev/null
+++ b/drivers/lightnvm/sysfs.c
@@ -0,0 +1,418 @@ 
+#include <linux/kernel.h>
+#include <linux/lightnvm.h>
+#include <linux/miscdevice.h>
+#include <linux/kobject.h>
+
+#include "sysfs.h"
+
+static struct kset *devices;
+static struct kset *targets;
+
+/*
+ * Functions and data structures for LightNVM targets in sysfs.
+ * This file contains the show-functions, release-functions, default_attrs,
+ * sysfs_register* function, and ktypes.
+ */
+
+#define NVM_TARGET_ATTR_RO(_name)					\
+	static struct attribute nvm_target_##_name##_attr = {		\
+	.name = __stringify(_name),					\
+	.mode = S_IRUGO							\
+	}
+
+#define NVM_TARGET_ATTR_LIST(_name) (&nvm_target_##_name##_attr)
+
+NVM_TARGET_ATTR_RO(type);
+
+static struct attribute *nvm_target_default_attrs[] = {
+	NVM_TARGET_ATTR_LIST(type),
+	NULL,
+};
+
+static ssize_t nvm_target_attr_show(struct kobject *kobj,
+				struct attribute *attr,
+				char *page)
+{
+	struct nvm_target *target = container_of(kobj, struct nvm_target, kobj);
+
+	if (strcmp(attr->name, "type") == 0) {
+		return scnprintf(page, PAGE_SIZE, "%s\n",
+				 target->type->name);
+	} else {
+		return scnprintf(page, PAGE_SIZE,
+			"Unhandled attr(%s) in `nvm_target_attr_show`\n",
+			attr->name);
+	}
+}
+
+static const struct sysfs_ops target_sysfs_ops = {
+	.show = nvm_target_attr_show,
+};
+
+static void nvm_target_release(struct kobject *kobj)
+{
+	struct nvm_target *tgt = container_of(kobj, struct nvm_target, kobj);
+
+	pr_debug("nvm/sysfs: `nvm_target_release`\n");
+
+	kfree(tgt);
+}
+
+static struct kobj_type nvm_target_ktype = {
+	.sysfs_ops	= &target_sysfs_ops,
+	.default_attrs	= nvm_target_default_attrs,
+	.release	= nvm_target_release
+};
+
+void nvm_sysfs_unregister_target(struct nvm_target *target)
+{
+	kobject_del(&target->kobj);
+	kobject_put(&target->kobj);
+}
+
+int nvm_sysfs_register_target(struct nvm_target *target)
+{
+	int ret;
+
+	target->kobj.kset = targets;
+	ret = kobject_init_and_add(&target->kobj, &nvm_target_ktype, NULL, "%s",
+				   target->disk->disk_name);
+	if (ret < 0) {
+		pr_err("nvm/sysfs: `_register_target` failed.\n");
+		kobject_put(&target->kobj);
+		return ret;
+	}
+
+	kobject_uevent(&target->kobj, KOBJ_ADD);
+
+	return 0;
+}
+
+/*
+ * Functions and data structures for exposing
+ * group-information of LightNVM enabled devices.
+ *
+ * NOTE: these are internal to sysfs.c and used by `nvm_sysfs_[un]register_dev`.
+ */
+
+static void nvm_grp_release(struct kobject *kobj)
+{
+	pr_debug("nvm/sysfs: called `nvm_grp_release`.\n");
+
+	/* This does nothing since `nvm_id_group` information is embedded inside
+	 * `nvm_dev`. Management of `nvm_id_group` is therefore handled by the
+	 * release of `nvm_dev_release`.
+	 */
+}
+
+#define NVM_GRP_ATTR_RO(_name)						\
+	static struct attribute nvm_grp_##_name##_attr = {		\
+	.name = __stringify(_name),					\
+	.mode = S_IRUGO							\
+	}
+
+#define NVM_GRP_ATTR_LIST(_name) (&nvm_grp_##_name##_attr)
+
+NVM_GRP_ATTR_RO(media_type);
+NVM_GRP_ATTR_RO(flash_media_type);
+NVM_GRP_ATTR_RO(num_channels);
+NVM_GRP_ATTR_RO(num_luns);
+NVM_GRP_ATTR_RO(num_planes);
+NVM_GRP_ATTR_RO(num_blocks);
+NVM_GRP_ATTR_RO(num_pages);
+NVM_GRP_ATTR_RO(page_size);
+NVM_GRP_ATTR_RO(sector_size);
+NVM_GRP_ATTR_RO(sector_oob_size);
+NVM_GRP_ATTR_RO(read_typ);
+NVM_GRP_ATTR_RO(read_max);
+NVM_GRP_ATTR_RO(prog_typ);
+NVM_GRP_ATTR_RO(prog_max);
+NVM_GRP_ATTR_RO(erase_typ);
+NVM_GRP_ATTR_RO(erase_max);
+NVM_GRP_ATTR_RO(multiplane);
+NVM_GRP_ATTR_RO(media_capabilities);
+NVM_GRP_ATTR_RO(channel_parallelism);
+
+static struct attribute *nvm_grp_default_attrs[] = {
+	NVM_GRP_ATTR_LIST(media_type),
+	NVM_GRP_ATTR_LIST(flash_media_type),
+	NVM_GRP_ATTR_LIST(num_channels),
+	NVM_GRP_ATTR_LIST(num_luns),
+	NVM_GRP_ATTR_LIST(num_planes),
+	NVM_GRP_ATTR_LIST(num_blocks),
+	NVM_GRP_ATTR_LIST(num_pages),
+	NVM_GRP_ATTR_LIST(page_size),
+	NVM_GRP_ATTR_LIST(sector_size),
+	NVM_GRP_ATTR_LIST(sector_oob_size),
+	NVM_GRP_ATTR_LIST(read_typ),
+	NVM_GRP_ATTR_LIST(read_max),
+	NVM_GRP_ATTR_LIST(prog_typ),
+	NVM_GRP_ATTR_LIST(prog_max),
+	NVM_GRP_ATTR_LIST(erase_typ),
+	NVM_GRP_ATTR_LIST(erase_max),
+	NVM_GRP_ATTR_LIST(multiplane),
+	NVM_GRP_ATTR_LIST(media_capabilities),
+	NVM_GRP_ATTR_LIST(channel_parallelism),
+	NULL,
+};
+
+static ssize_t nvm_grp_attr_show(struct kobject *kobj,
+			     struct attribute *attr,
+			     char *page)
+{
+	struct nvm_id_group *grp = container_of(kobj, struct nvm_id_group,
+						kobj);
+
+	if (strcmp(attr->name, "media_type") == 0) {		/* u8 */
+		return scnprintf(page, PAGE_SIZE, "%u\n", grp->mtype);
+	} else if (strcmp(attr->name, "flash_media_type") == 0) {
+		return scnprintf(page, PAGE_SIZE, "%u\n", grp->fmtype);
+	} else if (strcmp(attr->name, "num_channels") == 0) {
+		return scnprintf(page, PAGE_SIZE, "%u\n", grp->num_ch);
+	} else if (strcmp(attr->name, "num_luns") == 0) {
+		return scnprintf(page, PAGE_SIZE, "%u\n", grp->num_lun);
+	} else if (strcmp(attr->name, "num_planes") == 0) {
+		return scnprintf(page, PAGE_SIZE, "%u\n", grp->num_pln);
+	} else if (strcmp(attr->name, "num_blocks") == 0) {	/* u16 */
+		return scnprintf(page, PAGE_SIZE, "%u\n", grp->num_blk);
+	} else if (strcmp(attr->name, "num_pages") == 0) {
+		return scnprintf(page, PAGE_SIZE, "%u\n", grp->num_pg);
+	} else if (strcmp(attr->name, "page_size") == 0) {
+		return scnprintf(page, PAGE_SIZE, "%u\n", grp->fpg_sz);
+	} else if (strcmp(attr->name, "sector_size") == 0) {
+		return scnprintf(page, PAGE_SIZE, "%u\n", grp->csecs);
+	} else if (strcmp(attr->name, "sector_oob_size") == 0) {/* u32 */
+		return scnprintf(page, PAGE_SIZE, "%u\n", grp->sos);
+	} else if (strcmp(attr->name, "read_typ") == 0) {
+		return scnprintf(page, PAGE_SIZE, "%u\n", grp->trdt);
+	} else if (strcmp(attr->name, "read_max") == 0) {
+		return scnprintf(page, PAGE_SIZE, "%u\n", grp->trdm);
+	} else if (strcmp(attr->name, "prog_typ") == 0) {
+		return scnprintf(page, PAGE_SIZE, "%u\n", grp->tprt);
+	} else if (strcmp(attr->name, "prog_max") == 0) {
+		return scnprintf(page, PAGE_SIZE, "%u\n", grp->tprm);
+	} else if (strcmp(attr->name, "erase_typ") == 0) {
+		return scnprintf(page, PAGE_SIZE, "%u\n", grp->tbet);
+	} else if (strcmp(attr->name, "erase_max") == 0) {
+		return scnprintf(page, PAGE_SIZE, "%u\n", grp->tbem);
+	} else if (strcmp(attr->name, "multiplane") == 0) {
+		return scnprintf(page, PAGE_SIZE, "0x%08x\n", grp->mpos);
+	} else if (strcmp(attr->name, "media_capabilities") == 0) {
+		return scnprintf(page, PAGE_SIZE, "0x%08x\n", grp->mccap);
+	} else if (strcmp(attr->name, "channel_parallelism") == 0) {/* u16 */
+		return scnprintf(page, PAGE_SIZE, "%u\n", grp->cpar);
+	} else {
+		return scnprintf(page, PAGE_SIZE,
+				 "Unhandled attr(%s) in `nvm_grp_attr_show`\n",
+				 attr->name);
+	}
+}
+
+static const struct sysfs_ops nvm_grp_sysfs_ops = {
+	.show	= nvm_grp_attr_show,
+};
+
+static struct kobj_type nvm_grp_ktype = {
+	.sysfs_ops	= &nvm_grp_sysfs_ops,
+	.default_attrs	= nvm_grp_default_attrs,
+	.release	= nvm_grp_release
+};
+
+void nvm_sysfs_unregister_grps(struct nvm_dev *dev)
+{
+	int i;
+
+	for (i = 0; i < dev->identity.cgrps; i++) {
+		kobject_del(&dev->identity.groups[i].kobj);
+		kobject_put(&dev->identity.groups[i].kobj);
+		kobject_put(&dev->kobj);
+	}
+}
+
+static int nvm_sysfs_register_grps(struct nvm_dev *dev)
+{
+	int i, ret;
+
+	for (i = 0; i < dev->identity.cgrps; i++) {
+		ret = kobject_init_and_add(&dev->identity.groups[i].kobj,
+					   &nvm_grp_ktype,
+					   kobject_get(&dev->kobj),
+					   "grp%u", i);
+		if (ret < 0) {
+			pr_err("nvm/sysfs: `_register_grps` failed(%d)\n", ret);
+			goto grps_error;
+		}
+
+		kobject_uevent(&dev->identity.groups[i].kobj, KOBJ_ADD);
+	}
+
+	return 0;
+
+grps_error:
+	kobject_put(&dev->identity.groups[i].kobj);	/* The failed grp*/
+	kobject_put(&dev->kobj);
+
+	for (i = i - 1; i > 0; i--) {			/* Successful grps */
+		kobject_del(&dev->identity.groups[i].kobj);
+		kobject_put(&dev->identity.groups[i].kobj);
+		kobject_put(&dev->kobj);
+	}
+
+	return ret;
+}
+
+/*
+ * Functions and data structures for exposing LightNVM enabled devices.
+ */
+
+#define NVM_DEV_ATTR_RO(_name)						\
+	static struct attribute nvm_dev_##_name##_attr = {		\
+	.name = __stringify(_name),					\
+	.mode = S_IRUGO							\
+	}
+
+#define NVM_DEV_ATTR_LIST(_name) (&nvm_dev_##_name##_attr)
+
+NVM_DEV_ATTR_RO(version);
+NVM_DEV_ATTR_RO(vendor_opcode);
+NVM_DEV_ATTR_RO(num_groups);
+NVM_DEV_ATTR_RO(capabilities);
+NVM_DEV_ATTR_RO(device_mode);
+NVM_DEV_ATTR_RO(ppa_format);
+NVM_DEV_ATTR_RO(media_manager);
+
+static struct attribute *nvm_dev_default_attrs[] = {
+	NVM_DEV_ATTR_LIST(version),
+	NVM_DEV_ATTR_LIST(vendor_opcode),
+	NVM_DEV_ATTR_LIST(num_groups),
+	NVM_DEV_ATTR_LIST(capabilities),
+	NVM_DEV_ATTR_LIST(device_mode),
+	NVM_DEV_ATTR_LIST(ppa_format),
+	NVM_DEV_ATTR_LIST(media_manager),
+	NULL,
+};
+
+static ssize_t nvm_dev_attr_show(struct kobject *kobj, struct attribute *attr,
+				char *page)
+{
+	struct nvm_dev *dev = container_of(kobj, struct nvm_dev, kobj);
+	struct nvm_id *id = &dev->identity;
+
+	if (strcmp(attr->name, "version") == 0) {
+		return scnprintf(page, PAGE_SIZE, "%u\n", id->ver_id);
+	} else if (strcmp(attr->name, "vendor_opcode") == 0) {
+		return scnprintf(page, PAGE_SIZE, "%u\n", id->vmnt);
+	} else if (strcmp(attr->name, "num_groups") == 0) {
+		return scnprintf(page, PAGE_SIZE, "%u\n", id->cgrps);
+	} else if (strcmp(attr->name, "capabilities") == 0) {
+		return scnprintf(page, PAGE_SIZE, "%u\n", id->cap);
+	} else if (strcmp(attr->name, "device_mode") == 0) {
+		return scnprintf(page, PAGE_SIZE, "%u\n", id->dom);
+	} else if (strcmp(attr->name, "media_manager") == 0) {
+		if (!dev->mt)
+			return scnprintf(page, PAGE_SIZE, "%s\n", "none");
+		return scnprintf(page, PAGE_SIZE, "%s\n", dev->mt->name);
+	} else if (strcmp(attr->name, "ppa_format") == 0) {
+		return scnprintf(page, PAGE_SIZE,
+			"0x%02x%02x%02x%02x%02x%02x%02x%02x%02x%02x%02x%02x\n",
+			id->ppaf.ch_offset, id->ppaf.ch_len,
+			id->ppaf.lun_offset, id->ppaf.lun_len,
+			id->ppaf.pln_offset, id->ppaf.pln_len,
+			id->ppaf.blk_offset, id->ppaf.blk_len,
+			id->ppaf.pg_offset, id->ppaf.pg_len,
+			id->ppaf.sect_offset, id->ppaf.sect_len);
+	} else {
+		return scnprintf(page,
+				 PAGE_SIZE,
+				 "Unhandled attr(%s) in `nvm_dev_attr_show`\n",
+				 attr->name);
+	}
+}
+
+static const struct sysfs_ops nvm_dev_sysfs_ops = {
+	.show	= nvm_dev_attr_show,
+};
+
+static void nvm_dev_release(struct kobject *kobj)
+{
+	struct nvm_dev *dev = container_of(kobj, struct nvm_dev, kobj);
+
+	pr_debug("nvm/sysfs: `nvm_dev_release`\n");
+
+	kfree(dev);
+}
+
+static struct kobj_type nvm_dev_ktype = {
+	.sysfs_ops	= &nvm_dev_sysfs_ops,
+	.default_attrs	= nvm_dev_default_attrs,
+	.release	= nvm_dev_release
+};
+
+void nvm_sysfs_unregister_dev(struct nvm_dev *dev)
+{
+	nvm_sysfs_unregister_grps(dev);
+
+	kobject_del(&dev->kobj);
+	kobject_put(&dev->kobj);
+}
+
+int nvm_sysfs_register_dev(struct nvm_dev *dev)
+{
+	int ret;
+
+	dev->kobj.kset = devices;
+	ret = kobject_init_and_add(&dev->kobj, &nvm_dev_ktype, NULL, "%s",
+				   dev->name);
+	if (ret < 0) {
+		pr_err("nvm/sysfs: `_register_dev` failed(%d).\n", ret);
+		kobject_put(&dev->kobj);
+		return ret;
+	}
+	kobject_uevent(&dev->kobj, KOBJ_ADD);
+
+	ret = nvm_sysfs_register_grps(dev);
+	if (ret < 0) {
+		pr_err("nvm/sysfs: `_register_dev` rolling back.");
+
+		kobject_del(&dev->kobj);
+		kobject_put(&dev->kobj);
+
+		return ret;
+	}
+
+	return 0;
+}
+
+/*
+ * Functions for exposing LightNVM devices and targets in sysfs.
+ *
+ * They will reside as children of the given `miscdevice`.
+ */
+
+int nvm_sysfs_register(struct miscdevice *miscdev)
+{
+	devices = kset_create_and_add("devices", NULL,
+			kobject_get(&miscdev->this_device->kobj));
+	if (!devices)
+		goto devices_err;
+
+	targets = kset_create_and_add("targets", NULL,
+			kobject_get(&miscdev->this_device->kobj));
+	if (!targets)
+		goto targets_err;
+
+	return 0;
+
+targets_err:
+	kobject_put(&miscdev->this_device->kobj);
+	kset_unregister(devices);
+devices_err:
+	kobject_put(&miscdev->this_device->kobj);
+	return -ENOMEM;
+}
+
+void nvm_sysfs_unregister(struct miscdevice *miscdev)
+{
+	kset_unregister(targets);
+	kset_unregister(devices);
+}
diff --git a/drivers/lightnvm/sysfs.h b/drivers/lightnvm/sysfs.h
new file mode 100644
index 0000000..81e11fe
--- /dev/null
+++ b/drivers/lightnvm/sysfs.h
@@ -0,0 +1,18 @@ 
+/*
+ * Functions related to LightNVM sysfs handling.
+ */
+#ifndef NVM_SYSFS_H_
+#define NVM_SYSFS_H_
+
+#include <linux/lightnvm.h>
+
+int nvm_sysfs_register_target(struct nvm_target *);
+void nvm_sysfs_unregister_target(struct nvm_target *);
+
+int nvm_sysfs_register_dev(struct nvm_dev *);
+void nvm_sysfs_unregister_dev(struct nvm_dev *);
+
+int nvm_sysfs_register(struct miscdevice *);
+void nvm_sysfs_unregister(struct miscdevice *);
+
+#endif /* NVM_SYSFS_H_ */
diff --git a/include/linux/lightnvm.h b/include/linux/lightnvm.h
index 5eabdba..bbe3b72 100644
--- a/include/linux/lightnvm.h
+++ b/include/linux/lightnvm.h
@@ -174,6 +174,7 @@  struct nvm_id_group {
 	u16	cpar;
 
 	struct nvm_id_lp_tbl lptbl;
+	struct kobject kobj;
 };
 
 struct nvm_addr_format {
@@ -205,6 +206,7 @@  struct nvm_target {
 	struct list_head list;
 	struct nvm_tgt_type *type;
 	struct gendisk *disk;
+	struct kobject kobj;
 };
 
 struct nvm_tgt_instance {
@@ -360,6 +362,8 @@  struct nvm_dev {
 
 	struct mutex mlock;
 	spinlock_t lock;
+
+	struct kobject kobj;
 };
 
 static inline struct ppa_addr generic_to_dev_addr(struct nvm_dev *dev,