diff mbox series

qla2xxx: Add SysFS hook for FC-NVMe autoconnect

Message ID 20181112214017.11218-1-himanshu.madhani@cavium.com (mailing list archive)
State Deferred
Headers show
Series qla2xxx: Add SysFS hook for FC-NVMe autoconnect | expand

Commit Message

Madhani, Himanshu Nov. 12, 2018, 9:40 p.m. UTC
This patch adds a SysFS hook for systemd service to kick
off autoconnect command at the boot time.

Output of the SysFS hook will provide host-traddr/traddr which will
be used by NVMe CLI to kick off discovery at boot time.

Signed-off-by: Himanshu Madhani <himanshu.madhani@cavium.com>
---
Hi Martin, 

This patch provides mechanism for qla2xxx driver's boot time scripts for 
autodiscovery and autoconnection of NVMe LUNs. 

Please apply this to 4.21/scsi-queue at your earliest convenience. 

Thanks,
Himanshu
---
 drivers/scsi/qla2xxx/qla_attr.c | 122 +++++++++++++++++++++++++++++++++++++++-
 1 file changed, 121 insertions(+), 1 deletion(-)

Comments

Bart Van Assche Nov. 12, 2018, 10:06 p.m. UTC | #1
On Mon, 2018-11-12 at 13:40 -0800, Himanshu Madhani wrote:
> diff --git a/drivers/scsi/qla2xxx/qla_attr.c
> b/drivers/scsi/qla2xxx/qla_attr.c
> index 678aff5ca947..323a4aa35f16 100644
> --- a/drivers/scsi/qla2xxx/qla_attr.c
> +++ b/drivers/scsi/qla2xxx/qla_attr.c
> @@ -1665,6 +1665,125 @@ qla2x00_max_speed_sup_show(struct device *dev,
> struct device_attribute *attr,
>  	    ha->max_speed_sup ? "32Gps" : "16Gps");
>  }
>  
> +static ssize_t
> +qla27xx_nvme_connect_str_show(struct device *dev, struct device_attribute
> *attr,
> +    char *buf)
> +{
> +	scsi_qla_host_t *vha = shost_priv(class_to_shost(dev));
> +	struct nvme_fc_remote_port *rport;
> +	struct nvme_fc_local_port *lport;
> +	struct qla_hw_data *ha = vha->hw;
> +	struct qla_nvme_rport *qla_rport, *trport;
> +	fc_port_t *fcport;
> +	char temp[150] = {0};
> +	char *rportstate = "";
> +
> +	if (!IS_QLA27XX(ha) && !IS_QLA28XX(ha))
> +		return scnprintf(buf, PAGE_SIZE, "\n");
> +
> +	if (!vha->flags.nvme_enabled)
> +		return scnprintf(buf, PAGE_SIZE, "%s\n",
> +		    "FC-NVMe is not enabled");
> +
> +	list_for_each_entry(fcport, &vha->vp_fcports, list) {
> +		if (!fcport) {
> +			scnprintf(buf, PAGE_SIZE, "No FC host\n");
> +			return strlen(buf);
> +		}
> +
> +		if (!vha->nvme_local_port) {
> +			scnprintf(buf, PAGE_SIZE,
> +			    "FC-NVMe Initiator on 0x%16llx not
> registered.\n",
> +			    wwn_to_u64(fcport->port_name));
> +			return strlen(buf);
> +		}
> +
> +		list_for_each_entry_safe(qla_rport, trport,
> +		    &vha->nvme_rport_list, list) {
> +			if (qla_rport->fcport == fcport) {
> +				rport = fcport->nvme_remote_port;
> +
> +				lport = vha->nvme_local_port;
> +
> +				scnprintf(temp, sizeof(temp),
> +				    "FC-NVMe LPORT: host%ld nn-
> 0x%16llx:pn-0x%16llx port_id %06x %s\n",
> +				    vha->host_no, lport->node_name,
> +				    lport->port_name, lport->port_id,
> "ONLINE");
> +
> +				if (strlcat(buf, temp, PAGE_SIZE) >=
> PAGE_SIZE)
> +					goto done;
> +
> +				scnprintf(temp, sizeof(temp),
> +				    "FC-NVMe RPORT: host%ld nn-0x%llx:pn-
> 0x%llx port_id %06x ",
> +				    vha->host_no, rport->node_name,
> +				    rport->port_name, rport->port_id);
> +
> +				/* Find out Rport State */
> +				if (rport->port_state &
> FC_OBJSTATE_ONLINE)
> +					rportstate = "ONLINE";
> +
> +				if (rport->port_state &
> FC_OBJSTATE_UNKNOWN)
> +					rportstate = "UNKNOWN";
> +
> +				if (rport->port_state &
> ~(FC_OBJSTATE_ONLINE |
> +				    FC_OBJSTATE_UNKNOWN))
> +					rportstate = "UNSUPPORTED";
> +
> +				if (strlcat(buf, temp, PAGE_SIZE) >=
> +				    PAGE_SIZE)
> +					goto done;
> +
> +				if (rport->port_role &
> +				    (FC_PORT_ROLE_NVME_INITIATOR |
> +				      FC_PORT_ROLE_NVME_TARGET |
> +				      FC_PORT_ROLE_NVME_DISCOVERY)) {
> +					if (rport->port_role &
> +					    FC_PORT_ROLE_NVME_INITIATOR)
> +						if (strlcat(buf,
> "INITIATOR ",
> +						    PAGE_SIZE) >=
> PAGE_SIZE)
> +							goto done;
> +
> +					if (rport->port_role &
> +					    FC_PORT_ROLE_NVME_TARGET)
> +						if (strlcat(buf, "TARGET
> ",
> +						    PAGE_SIZE) >=
> PAGE_SIZE)
> +							goto done;
> +
> +					if (rport->port_role &
> +					    FC_PORT_ROLE_NVME_DISCOVERY)
> +						if (strlcat(buf,
> "DISCOVERY ",
> +						    PAGE_SIZE) >=
> PAGE_SIZE)
> +							goto done;
> +				} else {
> +					if (strlcat(buf, "UNKNOWN_ROLE ",
> +					    PAGE_SIZE) >= PAGE_SIZE)
> +						goto done;
> +				}
> +				scnprintf(temp, sizeof(temp), "%s\n",
> rportstate);
> +
> +				if (strlcat(buf, temp, PAGE_SIZE) >=
> PAGE_SIZE)
> +					goto done;
> +
> +				scnprintf(temp, sizeof(temp),
> +				    "NVMECLI: host-traddr=nn-0x%16llx:pn-
> 0x%16llx traddr=nn-0x%16llx:pn-0x%16llx\n",
> +				    lport->node_name, lport->port_name,
> +				    rport->node_name, rport->port_name);
> +
> +				if (strlcat(buf, temp, PAGE_SIZE) >=
> PAGE_SIZE)
> +					goto done;
> +			}
> +		}
> +	}
> +
> +	return strlen(buf);
> +
> +done:
> +	ql_log(ql_log_warn, vha, 0xffff,
> +	    "NVME connect string buffer size 0x%lx exceeds 0x%lx\n",
> +	    sizeof(*buf), PAGE_SIZE);
> +	return strlen(buf);
> +}

Hi Himanshu,

Are you familiar with the "one value per file" rule for sysfs? A quote from
Documentation/filesystems/sysfs.txt: "Attributes should be ASCII text files,
preferably with only one value per file."

Thanks,

Bart.
Madhani, Himanshu Nov. 13, 2018, 1:02 a.m. UTC | #2
Hi Bart, 

> On Nov 12, 2018, at 2:06 PM, Bart Van Assche <bvanassche@acm.org> wrote:
> 
> External Email
> 
> On Mon, 2018-11-12 at 13:40 -0800, Himanshu Madhani wrote:
>> diff --git a/drivers/scsi/qla2xxx/qla_attr.c
>> b/drivers/scsi/qla2xxx/qla_attr.c
>> index 678aff5ca947..323a4aa35f16 100644
>> --- a/drivers/scsi/qla2xxx/qla_attr.c
>> +++ b/drivers/scsi/qla2xxx/qla_attr.c
>> @@ -1665,6 +1665,125 @@ qla2x00_max_speed_sup_show(struct device *dev,
>> struct device_attribute *attr,
>>          ha->max_speed_sup ? "32Gps" : "16Gps");
>> }
>> 
>> +static ssize_t
>> +qla27xx_nvme_connect_str_show(struct device *dev, struct device_attribute
>> *attr,
>> +    char *buf)
>> +{
>> +     scsi_qla_host_t *vha = shost_priv(class_to_shost(dev));
>> +     struct nvme_fc_remote_port *rport;
>> +     struct nvme_fc_local_port *lport;
>> +     struct qla_hw_data *ha = vha->hw;
>> +     struct qla_nvme_rport *qla_rport, *trport;
>> +     fc_port_t *fcport;
>> +     char temp[150] = {0};
>> +     char *rportstate = "";
>> +
>> +     if (!IS_QLA27XX(ha) && !IS_QLA28XX(ha))
>> +             return scnprintf(buf, PAGE_SIZE, "\n");
>> +
>> +     if (!vha->flags.nvme_enabled)
>> +             return scnprintf(buf, PAGE_SIZE, "%s\n",
>> +                 "FC-NVMe is not enabled");
>> +
>> +     list_for_each_entry(fcport, &vha->vp_fcports, list) {
>> +             if (!fcport) {
>> +                     scnprintf(buf, PAGE_SIZE, "No FC host\n");
>> +                     return strlen(buf);
>> +             }
>> +
>> +             if (!vha->nvme_local_port) {
>> +                     scnprintf(buf, PAGE_SIZE,
>> +                         "FC-NVMe Initiator on 0x%16llx not
>> registered.\n",
>> +                         wwn_to_u64(fcport->port_name));
>> +                     return strlen(buf);
>> +             }
>> +
>> +             list_for_each_entry_safe(qla_rport, trport,
>> +                 &vha->nvme_rport_list, list) {
>> +                     if (qla_rport->fcport == fcport) {
>> +                             rport = fcport->nvme_remote_port;
>> +
>> +                             lport = vha->nvme_local_port;
>> +
>> +                             scnprintf(temp, sizeof(temp),
>> +                                 "FC-NVMe LPORT: host%ld nn-
>> 0x%16llx:pn-0x%16llx port_id %06x %s\n",
>> +                                 vha->host_no, lport->node_name,
>> +                                 lport->port_name, lport->port_id,
>> "ONLINE");
>> +
>> +                             if (strlcat(buf, temp, PAGE_SIZE) >=
>> PAGE_SIZE)
>> +                                     goto done;
>> +
>> +                             scnprintf(temp, sizeof(temp),
>> +                                 "FC-NVMe RPORT: host%ld nn-0x%llx:pn-
>> 0x%llx port_id %06x ",
>> +                                 vha->host_no, rport->node_name,
>> +                                 rport->port_name, rport->port_id);
>> +
>> +                             /* Find out Rport State */
>> +                             if (rport->port_state &
>> FC_OBJSTATE_ONLINE)
>> +                                     rportstate = "ONLINE";
>> +
>> +                             if (rport->port_state &
>> FC_OBJSTATE_UNKNOWN)
>> +                                     rportstate = "UNKNOWN";
>> +
>> +                             if (rport->port_state &
>> ~(FC_OBJSTATE_ONLINE |
>> +                                 FC_OBJSTATE_UNKNOWN))
>> +                                     rportstate = "UNSUPPORTED";
>> +
>> +                             if (strlcat(buf, temp, PAGE_SIZE) >=
>> +                                 PAGE_SIZE)
>> +                                     goto done;
>> +
>> +                             if (rport->port_role &
>> +                                 (FC_PORT_ROLE_NVME_INITIATOR |
>> +                                   FC_PORT_ROLE_NVME_TARGET |
>> +                                   FC_PORT_ROLE_NVME_DISCOVERY)) {
>> +                                     if (rport->port_role &
>> +                                         FC_PORT_ROLE_NVME_INITIATOR)
>> +                                             if (strlcat(buf,
>> "INITIATOR ",
>> +                                                 PAGE_SIZE) >=
>> PAGE_SIZE)
>> +                                                     goto done;
>> +
>> +                                     if (rport->port_role &
>> +                                         FC_PORT_ROLE_NVME_TARGET)
>> +                                             if (strlcat(buf, "TARGET
>> ",
>> +                                                 PAGE_SIZE) >=
>> PAGE_SIZE)
>> +                                                     goto done;
>> +
>> +                                     if (rport->port_role &
>> +                                         FC_PORT_ROLE_NVME_DISCOVERY)
>> +                                             if (strlcat(buf,
>> "DISCOVERY ",
>> +                                                 PAGE_SIZE) >=
>> PAGE_SIZE)
>> +                                                     goto done;
>> +                             } else {
>> +                                     if (strlcat(buf, "UNKNOWN_ROLE ",
>> +                                         PAGE_SIZE) >= PAGE_SIZE)
>> +                                             goto done;
>> +                             }
>> +                             scnprintf(temp, sizeof(temp), "%s\n",
>> rportstate);
>> +
>> +                             if (strlcat(buf, temp, PAGE_SIZE) >=
>> PAGE_SIZE)
>> +                                     goto done;
>> +
>> +                             scnprintf(temp, sizeof(temp),
>> +                                 "NVMECLI: host-traddr=nn-0x%16llx:pn-
>> 0x%16llx traddr=nn-0x%16llx:pn-0x%16llx\n",
>> +                                 lport->node_name, lport->port_name,
>> +                                 rport->node_name, rport->port_name);
>> +
>> +                             if (strlcat(buf, temp, PAGE_SIZE) >=
>> PAGE_SIZE)
>> +                                     goto done;
>> +                     }
>> +             }
>> +     }
>> +
>> +     return strlen(buf);
>> +
>> +done:
>> +     ql_log(ql_log_warn, vha, 0xffff,
>> +         "NVME connect string buffer size 0x%lx exceeds 0x%lx\n",
>> +         sizeof(*buf), PAGE_SIZE);
>> +     return strlen(buf);
>> +}
> 
> Hi Himanshu,
> 
> Are you familiar with the "one value per file" rule for sysfs? A quote from
> Documentation/filesystems/sysfs.txt: "Attributes should be ASCII text files,
> preferably with only one value per file."
> 
> Thanks,
> 
> Bart.
> 

Yes. Thanks for pointing out documentation related to SysFS. 

The main reason this was done to allow NVMe CLI to be able to take string with
host-traddr/traddr in one shot instead of parsing multiple SysFS hooks.

I see other drivers also use similar information populated for NVMe Connection at boot time.

Thanks,
- Himanshu
Bart Van Assche Nov. 13, 2018, 2:23 p.m. UTC | #3
On Tue, 2018-11-13 at 01:02 +0000, Madhani, Himanshu wrote:
> I see other drivers also use similar information populated for NVMe
> Connection at boot time.

Hi Himanshu,

Which other drivers are you referring to?

Thanks,

Bart.
Madhani, Himanshu Nov. 13, 2018, 5:38 p.m. UTC | #4
> On Nov 13, 2018, at 6:23 AM, Bart Van Assche <bvanassche@acm.org> wrote:
> 
> External Email
> 
> On Tue, 2018-11-13 at 01:02 +0000, Madhani, Himanshu wrote:
>> I see other drivers also use similar information populated for NVMe
>> Connection at boot time.
> 
> Hi Himanshu,
> 
> Which other drivers are you referring to?
> 
> Thanks,
> 
> Bart.

https://git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git/tree/drivers/scsi/lpfc/lpfc_attr.c

Thanks,
- Himanshu
Bart Van Assche Nov. 13, 2018, 5:49 p.m. UTC | #5
On Tue, 2018-11-13 at 17:38 +0000, Madhani, Himanshu wrote:
> On Nov 13, 2018, at 6:23 AM, Bart Van Assche <bvanassche@acm.org> wrote:
> > On Tue, 2018-11-13 at 01:02 +0000, Madhani, Himanshu wrote:
> > > I see other drivers also use similar information populated for NVMe
> > > Connection at boot time.
> > 
> > Hi Himanshu,
> > 
> > Which other drivers are you referring to?
> > 
> > Thanks,
> > 
> > Bart.
> 
> https://git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git/tree/dri
> vers/scsi/lpfc/lpfc_attr.c

Hello James,

Please either move the information exported by lpfc_nvme_info_show() into
debugfs or split the information exported by that function into multiple
sysfs attributes. Otherwise you will get flamed by Greg KH as soon as he
encounters that code because of not following the rules explained in
Documentation/filesystems/sysfs.txt.

Bart.
Ewan Milne Nov. 14, 2018, 5:13 p.m. UTC | #6
On Tue, 2018-11-13 at 09:49 -0800, Bart Van Assche wrote:
> On Tue, 2018-11-13 at 17:38 +0000, Madhani, Himanshu wrote:
> > On Nov 13, 2018, at 6:23 AM, Bart Van Assche <bvanassche@acm.org> wrote:
> > > On Tue, 2018-11-13 at 01:02 +0000, Madhani, Himanshu wrote:
> > > > I see other drivers also use similar information populated for NVMe
> > > > Connection at boot time.
> > > 
> > > Hi Himanshu,
> > > 
> > > Which other drivers are you referring to?
> > > 
> > > Thanks,
> > > 
> > > Bart.
> > 
> > https://git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git/tree/dri
> > vers/scsi/lpfc/lpfc_attr.c
> 
> Hello James,
> 
> Please either move the information exported by lpfc_nvme_info_show() into
> debugfs or split the information exported by that function into multiple
> sysfs attributes. Otherwise you will get flamed by Greg KH as soon as he
> encounters that code because of not following the rules explained in
> Documentation/filesystems/sysfs.txt.
> 
> Bart.

Better yet, is there some way we could get at least the connection
information needed for discovery moved into the NVMe/FC transport
layer so that we don't need to have separate implementations that
have to be parsed?

-Ewan
Hannes Reinecke Nov. 15, 2018, 12:52 p.m. UTC | #7
On 11/14/18 6:13 PM, Ewan D. Milne wrote:
> On Tue, 2018-11-13 at 09:49 -0800, Bart Van Assche wrote:
>> On Tue, 2018-11-13 at 17:38 +0000, Madhani, Himanshu wrote:
>>> On Nov 13, 2018, at 6:23 AM, Bart Van Assche <bvanassche@acm.org> wrote:
>>>> On Tue, 2018-11-13 at 01:02 +0000, Madhani, Himanshu wrote:
>>>>> I see other drivers also use similar information populated for NVMe
>>>>> Connection at boot time.
>>>>
>>>> Hi Himanshu,
>>>>
>>>> Which other drivers are you referring to?
>>>>
>>>> Thanks,
>>>>
>>>> Bart.
>>>
>>> https://git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git/tree/dri
>>> vers/scsi/lpfc/lpfc_attr.c
>>
>> Hello James,
>>
>> Please either move the information exported by lpfc_nvme_info_show() into
>> debugfs or split the information exported by that function into multiple
>> sysfs attributes. Otherwise you will get flamed by Greg KH as soon as he
>> encounters that code because of not following the rules explained in
>> Documentation/filesystems/sysfs.txt.
>>
>> Bart.
> 
> Better yet, is there some way we could get at least the connection
> information needed for discovery moved into the NVMe/FC transport
> layer so that we don't need to have separate implementations that
> have to be parsed?
> 
Sigh.

It's already been solved, there's no need for this attribute.

nvme-fc already sends a uevent whenever a new nvme rport is attached,
(check nvme_fc_register_remoteport()->nvme_fc_signal_discovery_scan().

James Smart added a sysfs entry for retriggering these uevents, and I've 
posted a set of scripts (... at least I think I did :-) utilizing those.

Please do _NOT_ do this; better rely on common mechanisms here.

And please, next time please cc linux-nvme so that we can track it better.

Cheers,

Hannes
Ewan Milne Nov. 15, 2018, 4:40 p.m. UTC | #8
On Thu, 2018-11-15 at 13:52 +0100, Hannes Reinecke wrote:
> On 11/14/18 6:13 PM, Ewan D. Milne wrote:
> > On Tue, 2018-11-13 at 09:49 -0800, Bart Van Assche wrote:
> > > On Tue, 2018-11-13 at 17:38 +0000, Madhani, Himanshu wrote:
> > > > On Nov 13, 2018, at 6:23 AM, Bart Van Assche <bvanassche@acm.org> wrote:
> > > > > On Tue, 2018-11-13 at 01:02 +0000, Madhani, Himanshu wrote:
> > > > > > I see other drivers also use similar information populated for NVMe
> > > > > > Connection at boot time.
> > > > > 
> > > > > Hi Himanshu,
> > > > > 
> > > > > Which other drivers are you referring to?
> > > > > 
> > > > > Thanks,
> > > > > 
> > > > > Bart.
> > > > 
> > > > https://git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git/tree/dri
> > > > vers/scsi/lpfc/lpfc_attr.c
> > > 
> > > Hello James,
> > > 
> > > Please either move the information exported by lpfc_nvme_info_show() into
> > > debugfs or split the information exported by that function into multiple
> > > sysfs attributes. Otherwise you will get flamed by Greg KH as soon as he
> > > encounters that code because of not following the rules explained in
> > > Documentation/filesystems/sysfs.txt.
> > > 
> > > Bart.
> > 
> > Better yet, is there some way we could get at least the connection
> > information needed for discovery moved into the NVMe/FC transport
> > layer so that we don't need to have separate implementations that
> > have to be parsed?
> > 
> 
> Sigh.
> 
> It's already been solved, there's no need for this attribute.
> 
> nvme-fc already sends a uevent whenever a new nvme rport is attached,
> (check nvme_fc_register_remoteport()->nvme_fc_signal_discovery_scan().
> 
> James Smart added a sysfs entry for retriggering these uevents, and I've 
> posted a set of scripts (... at least I think I did :-) utilizing those.
> 
> Please do _NOT_ do this; better rely on common mechanisms here.
> 
> And please, next time please cc linux-nvme so that we can track it better.
> 
> Cheers,
> 
> Hannes

So, the uevent can take care of the discovery mechanism, however there
are other reasons why one would want to be able to know e.g. what the
WWPNs of the Initiator & Target ports of the NVMe connections are.
For example, we would typically want to capture this information when
troubleshooting a system remotely.  There are also SAN test environments
that have a need for this information, which is one of the reasons
for the original patch posting.  I don't think it matters too much how
we do it, but the information is needed somehow.

I suppose a udev rule could put the information into a file, but that
seems awkard, and the infomation neeeded is in kernel objects, so why
not make it visible?

The uevent is of course necessary as it signifies an event, but the
desired info here is the *state*.

-Ewan
James Smart Nov. 15, 2018, 9:47 p.m. UTC | #9
On 11/15/2018 4:52 AM, Hannes Reinecke wrote:
> On 11/14/18 6:13 PM, Ewan D. Milne wrote:
>> On Tue, 2018-11-13 at 09:49 -0800, Bart Van Assche wrote:
>>> On Tue, 2018-11-13 at 17:38 +0000, Madhani, Himanshu wrote:
>>>> On Nov 13, 2018, at 6:23 AM, Bart Van Assche <bvanassche@acm.org> 
>>>> wrote:
>>>>> On Tue, 2018-11-13 at 01:02 +0000, Madhani, Himanshu wrote:
>>>>>> I see other drivers also use similar information populated for NVMe
>>>>>> Connection at boot time.
>>>>>
>>>>> Hi Himanshu,
>>>>>
>>>>> Which other drivers are you referring to?
>>>>>
>>>>> Thanks,
>>>>>
>>>>> Bart.
>>>>
>>>> https://git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git/tree/dri 
>>>>
>>>> vers/scsi/lpfc/lpfc_attr.c
>>>
>>> Hello James,
>>>
>>> Please either move the information exported by lpfc_nvme_info_show() 
>>> into
>>> debugfs or split the information exported by that function into 
>>> multiple
>>> sysfs attributes. Otherwise you will get flamed by Greg KH as soon 
>>> as he
>>> encounters that code because of not following the rules explained in
>>> Documentation/filesystems/sysfs.txt.
>>>
>>> Bart.
>>
>> Better yet, is there some way we could get at least the connection
>> information needed for discovery moved into the NVMe/FC transport
>> layer so that we don't need to have separate implementations that
>> have to be parsed?
>>
> Sigh.
>
> It's already been solved, there's no need for this attribute.
>
> nvme-fc already sends a uevent whenever a new nvme rport is attached,
> (check nvme_fc_register_remoteport()->nvme_fc_signal_discovery_scan().
>
> James Smart added a sysfs entry for retriggering these uevents, and 
> I've posted a set of scripts (... at least I think I did :-) utilizing 
> those.
>
> Please do _NOT_ do this; better rely on common mechanisms here.
>
> And please, next time please cc linux-nvme so that we can track it 
> better.
>
> Cheers,
>
> Hannes

Yes - the attribute in lpfc needs to go. It's currently a temporary 
thing until we merge the nvme and scsi transports and informational only.

It *is not* used for autoconnect.  The nvme fc transport automatically 
generates udev events upon nvme remoteport registrations and, as Hannes 
points out, we added a transport attribute to regenerate connect events 
for a host:
http://git.infradead.org/nvme.git/commit/97faec531460c949d7120672b8c77e2f41f8d6d7

-- james
James Smart Nov. 15, 2018, 9:49 p.m. UTC | #10
On 11/15/2018 8:43 AM, Ewan D. Milne wrote:
> On Thu, 2018-11-15 at 13:52 +0100, Hannes Reinecke wrote:
>> On 11/14/18 6:13 PM, Ewan D. Milne wrote:
>>> On Tue, 2018-11-13 at 09:49 -0800, Bart Van Assche wrote:
>>>> On Tue, 2018-11-13 at 17:38 +0000, Madhani, Himanshu wrote:
>>>>> On Nov 13, 2018, at 6:23 AM, Bart Van Assche <bvanassche@acm.org> wrote:
>>>>>> On Tue, 2018-11-13 at 01:02 +0000, Madhani, Himanshu wrote:
>>>>>>> I see other drivers also use similar information populated for NVMe
>>>>>>> Connection at boot time.
>>>>>> Hi Himanshu,
>>>>>>
>>>>>> Which other drivers are you referring to?
>>>>>>
>>>>>> Thanks,
>>>>>>
>>>>>> Bart.
>>>>> https://git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git/tree/dri
>>>>> vers/scsi/lpfc/lpfc_attr.c
>>>> Hello James,
>>>>
>>>> Please either move the information exported by lpfc_nvme_info_show() into
>>>> debugfs or split the information exported by that function into multiple
>>>> sysfs attributes. Otherwise you will get flamed by Greg KH as soon as he
>>>> encounters that code because of not following the rules explained in
>>>> Documentation/filesystems/sysfs.txt.
>>>>
>>>> Bart.
>>> Better yet, is there some way we could get at least the connection
>>> information needed for discovery moved into the NVMe/FC transport
>>> layer so that we don't need to have separate implementations that
>>> have to be parsed?
>>>
>> Sigh.
>>
>> It's already been solved, there's no need for this attribute.
>>
>> nvme-fc already sends a uevent whenever a new nvme rport is attached,
>> (check nvme_fc_register_remoteport()->nvme_fc_signal_discovery_scan().
>>
>> James Smart added a sysfs entry for retriggering these uevents, and I've
>> posted a set of scripts (... at least I think I did :-) utilizing those.
>>
>> Please do _NOT_ do this; better rely on common mechanisms here.
>>
>> And please, next time please cc linux-nvme so that we can track it better.
>>
>> Cheers,
>>
>> Hannes
> So, the uevent can take care of the discovery mechanism, however there
> are other reasons why one would want to be able to know e.g. what the
> WWPNs of the Initiator & Target ports of the NVMe connections are.
> For example, we would typically want to capture this information when
> troubleshooting a system remotely.  There are also SAN test environments
> that have a need for this information, which is one of the reasons
> for the original patch posting.  I don't think it matters too much how
> we do it, but the information is needed somehow.
>
> I suppose a udev rule could put the information into a file, but that
> seems awkard, and the infomation neeeded is in kernel objects, so why
> not make it visible?
>
> The uevent is of course necessary as it signifies an event, but the
> desired info here is the *state*.
>
> -Ewan
>

Well - that's the intent of merging the nvme fc and scsi fc transports 
into a fc-specific transport.  I'd be glad to have help doing this, but 
it has taken a lower priority.  Right now, for lpfc, I instruct everyone 
to use the scsi fc transport port structures for state.

-- james
Hannes Reinecke Nov. 16, 2018, 7:18 a.m. UTC | #11
On 11/15/18 5:40 PM, Ewan D. Milne wrote:
> On Thu, 2018-11-15 at 13:52 +0100, Hannes Reinecke wrote:
>> On 11/14/18 6:13 PM, Ewan D. Milne wrote:
>>> On Tue, 2018-11-13 at 09:49 -0800, Bart Van Assche wrote:
>>>> On Tue, 2018-11-13 at 17:38 +0000, Madhani, Himanshu wrote:
>>>>> On Nov 13, 2018, at 6:23 AM, Bart Van Assche <bvanassche@acm.org> wrote:
>>>>>> On Tue, 2018-11-13 at 01:02 +0000, Madhani, Himanshu wrote:
>>>>>>> I see other drivers also use similar information populated for NVMe
>>>>>>> Connection at boot time.
>>>>>>
>>>>>> Hi Himanshu,
>>>>>>
>>>>>> Which other drivers are you referring to?
>>>>>>
>>>>>> Thanks,
>>>>>>
>>>>>> Bart.
>>>>>
>>>>> https://git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git/tree/dri
>>>>> vers/scsi/lpfc/lpfc_attr.c
>>>>
>>>> Hello James,
>>>>
>>>> Please either move the information exported by lpfc_nvme_info_show() into
>>>> debugfs or split the information exported by that function into multiple
>>>> sysfs attributes. Otherwise you will get flamed by Greg KH as soon as he
>>>> encounters that code because of not following the rules explained in
>>>> Documentation/filesystems/sysfs.txt.
>>>>
>>>> Bart.
>>>
>>> Better yet, is there some way we could get at least the connection
>>> information needed for discovery moved into the NVMe/FC transport
>>> layer so that we don't need to have separate implementations that
>>> have to be parsed?
>>>
>>
>> Sigh.
>>
>> It's already been solved, there's no need for this attribute.
>>
>> nvme-fc already sends a uevent whenever a new nvme rport is attached,
>> (check nvme_fc_register_remoteport()->nvme_fc_signal_discovery_scan().
>>
>> James Smart added a sysfs entry for retriggering these uevents, and I've
>> posted a set of scripts (... at least I think I did :-) utilizing those.
>>
>> Please do _NOT_ do this; better rely on common mechanisms here.
>>
>> And please, next time please cc linux-nvme so that we can track it better.
>>
>> Cheers,
>>
>> Hannes
> 
> So, the uevent can take care of the discovery mechanism, however there
> are other reasons why one would want to be able to know e.g. what the
> WWPNs of the Initiator & Target ports of the NVMe connections are.
> For example, we would typically want to capture this information when
> troubleshooting a system remotely.  There are also SAN test environments
> that have a need for this information, which is one of the reasons
> for the original patch posting.  I don't think it matters too much how
> we do it, but the information is needed somehow.
> 
> I suppose a udev rule could put the information into a file, but that
> seems awkard, and the infomation neeeded is in kernel objects, so why
> not make it visible?
> 
> The uevent is of course necessary as it signifies an event, but the
> desired info here is the *state*.
> 
I somewhat concur here in that we really do need some sysfs entry 
holding the remote port information.
However, lumping this all into one file and locate that file in the 
_SCSI_ host sysfs directory is _so_ fundamentally wrong :-)

What we should be doing is coming up with an abstraction here, allowing 
for 'real' FC remote port sysfs entries for NVMe.
At the moment FC-NVMe has loads of internal structures (cf 
nvme_fc_lport, nvme_fc_rport) which are never _ever_ mirrored in sysfs.
And that is the real issue we should be addressing; once we have these 
structures in sysfs we won't need to invest in some band-aids like the 
'info' fields.

Cheers,

Hannes
diff mbox series

Patch

diff --git a/drivers/scsi/qla2xxx/qla_attr.c b/drivers/scsi/qla2xxx/qla_attr.c
index 678aff5ca947..323a4aa35f16 100644
--- a/drivers/scsi/qla2xxx/qla_attr.c
+++ b/drivers/scsi/qla2xxx/qla_attr.c
@@ -1665,6 +1665,125 @@  qla2x00_max_speed_sup_show(struct device *dev, struct device_attribute *attr,
 	    ha->max_speed_sup ? "32Gps" : "16Gps");
 }
 
+static ssize_t
+qla27xx_nvme_connect_str_show(struct device *dev, struct device_attribute *attr,
+    char *buf)
+{
+	scsi_qla_host_t *vha = shost_priv(class_to_shost(dev));
+	struct nvme_fc_remote_port *rport;
+	struct nvme_fc_local_port *lport;
+	struct qla_hw_data *ha = vha->hw;
+	struct qla_nvme_rport *qla_rport, *trport;
+	fc_port_t *fcport;
+	char temp[150] = {0};
+	char *rportstate = "";
+
+	if (!IS_QLA27XX(ha) && !IS_QLA28XX(ha))
+		return scnprintf(buf, PAGE_SIZE, "\n");
+
+	if (!vha->flags.nvme_enabled)
+		return scnprintf(buf, PAGE_SIZE, "%s\n",
+		    "FC-NVMe is not enabled");
+
+	list_for_each_entry(fcport, &vha->vp_fcports, list) {
+		if (!fcport) {
+			scnprintf(buf, PAGE_SIZE, "No FC host\n");
+			return strlen(buf);
+		}
+
+		if (!vha->nvme_local_port) {
+			scnprintf(buf, PAGE_SIZE,
+			    "FC-NVMe Initiator on 0x%16llx not registered.\n",
+			    wwn_to_u64(fcport->port_name));
+			return strlen(buf);
+		}
+
+		list_for_each_entry_safe(qla_rport, trport,
+		    &vha->nvme_rport_list, list) {
+			if (qla_rport->fcport == fcport) {
+				rport = fcport->nvme_remote_port;
+
+				lport = vha->nvme_local_port;
+
+				scnprintf(temp, sizeof(temp),
+				    "FC-NVMe LPORT: host%ld nn-0x%16llx:pn-0x%16llx port_id %06x %s\n",
+				    vha->host_no, lport->node_name,
+				    lport->port_name, lport->port_id, "ONLINE");
+
+				if (strlcat(buf, temp, PAGE_SIZE) >= PAGE_SIZE)
+					goto done;
+
+				scnprintf(temp, sizeof(temp),
+				    "FC-NVMe RPORT: host%ld nn-0x%llx:pn-0x%llx port_id %06x ",
+				    vha->host_no, rport->node_name,
+				    rport->port_name, rport->port_id);
+
+				/* Find out Rport State */
+				if (rport->port_state & FC_OBJSTATE_ONLINE)
+					rportstate = "ONLINE";
+
+				if (rport->port_state & FC_OBJSTATE_UNKNOWN)
+					rportstate = "UNKNOWN";
+
+				if (rport->port_state & ~(FC_OBJSTATE_ONLINE |
+				    FC_OBJSTATE_UNKNOWN))
+					rportstate = "UNSUPPORTED";
+
+				if (strlcat(buf, temp, PAGE_SIZE) >=
+				    PAGE_SIZE)
+					goto done;
+
+				if (rport->port_role &
+				    (FC_PORT_ROLE_NVME_INITIATOR |
+				      FC_PORT_ROLE_NVME_TARGET |
+				      FC_PORT_ROLE_NVME_DISCOVERY)) {
+					if (rport->port_role &
+					    FC_PORT_ROLE_NVME_INITIATOR)
+						if (strlcat(buf, "INITIATOR ",
+						    PAGE_SIZE) >= PAGE_SIZE)
+							goto done;
+
+					if (rport->port_role &
+					    FC_PORT_ROLE_NVME_TARGET)
+						if (strlcat(buf, "TARGET ",
+						    PAGE_SIZE) >= PAGE_SIZE)
+							goto done;
+
+					if (rport->port_role &
+					    FC_PORT_ROLE_NVME_DISCOVERY)
+						if (strlcat(buf, "DISCOVERY ",
+						    PAGE_SIZE) >= PAGE_SIZE)
+							goto done;
+				} else {
+					if (strlcat(buf, "UNKNOWN_ROLE ",
+					    PAGE_SIZE) >= PAGE_SIZE)
+						goto done;
+				}
+				scnprintf(temp, sizeof(temp), "%s\n", rportstate);
+
+				if (strlcat(buf, temp, PAGE_SIZE) >= PAGE_SIZE)
+					goto done;
+
+				scnprintf(temp, sizeof(temp),
+				    "NVMECLI: host-traddr=nn-0x%16llx:pn-0x%16llx traddr=nn-0x%16llx:pn-0x%16llx\n",
+				    lport->node_name, lport->port_name,
+				    rport->node_name, rport->port_name);
+
+				if (strlcat(buf, temp, PAGE_SIZE) >= PAGE_SIZE)
+					goto done;
+			}
+		}
+	}
+
+	return strlen(buf);
+
+done:
+	ql_log(ql_log_warn, vha, 0xffff,
+	    "NVME connect string buffer size 0x%lx exceeds 0x%lx\n",
+	    sizeof(*buf), PAGE_SIZE);
+	return strlen(buf);
+}
+
 /* ----- */
 
 static ssize_t
@@ -2145,7 +2264,7 @@  static DEVICE_ATTR(zio_threshold, 0644,
 static DEVICE_ATTR_RW(qlini_mode);
 static DEVICE_ATTR_RW(ql2xexchoffld);
 static DEVICE_ATTR_RW(ql2xiniexchg);
-
+static DEVICE_ATTR(nvme_connect_str, 0444, qla27xx_nvme_connect_str_show, NULL);
 
 struct device_attribute *qla2x00_host_attrs[] = {
 	&dev_attr_driver_version,
@@ -2183,6 +2302,7 @@  struct device_attribute *qla2x00_host_attrs[] = {
 	&dev_attr_min_link_speed,
 	&dev_attr_max_speed_sup,
 	&dev_attr_zio_threshold,
+	&dev_attr_nvme_connect_str,
 	NULL, /* reserve for qlini_mode */
 	NULL, /* reserve for ql2xiniexchg */
 	NULL, /* reserve for ql2xexchoffld */