diff mbox

[3/6] qla2xxx: Add FC-NVMe F/W initialization and transport registration

Message ID 20170616224744.3681-4-himanshu.madhani@cavium.com (mailing list archive)
State Changes Requested, archived
Headers show

Commit Message

Madhani, Himanshu June 16, 2017, 10:47 p.m. UTC
From: Duane Grigsby <duane.grigsby@cavium.com>

This code provides the interfaces to register remote and local ports
of FC4 type 0x28 with the FC-NVMe transport and transports the
requests (FC-NVMe FC link services and FC-NVMe commands IUs) to the
fabric. It also provides the support for allocating h/w queues and
aborting FC-NVMe FC requests.

Signed-off-by: Darren Trapp <darren.trapp@cavium.com>
Signed-off-by: Duane Grigsby <duane.grigsby@cavium.com>
Signed-off-by: Anil Gurumurthy <anil.gurumurhty@cavium.com>
Signed-off-by: Giridhar Malavali <giridhar.malavali@cavium.com>
Signed-off-by: Himanshu Madhani <himanshu.madhani@cavium.com>
---
 drivers/scsi/qla2xxx/Makefile   |   2 +-
 drivers/scsi/qla2xxx/qla_dbg.c  |   2 +-
 drivers/scsi/qla2xxx/qla_def.h  |   3 +
 drivers/scsi/qla2xxx/qla_gbl.h  |  27 ++
 drivers/scsi/qla2xxx/qla_init.c |   8 +
 drivers/scsi/qla2xxx/qla_iocb.c |  36 ++
 drivers/scsi/qla2xxx/qla_isr.c  |  19 +
 drivers/scsi/qla2xxx/qla_mbx.c  |  22 ++
 drivers/scsi/qla2xxx/qla_nvme.c | 797 ++++++++++++++++++++++++++++++++++++++++
 drivers/scsi/qla2xxx/qla_nvme.h | 132 +++++++
 drivers/scsi/qla2xxx/qla_os.c   |  40 +-
 11 files changed, 1079 insertions(+), 9 deletions(-)
 create mode 100644 drivers/scsi/qla2xxx/qla_nvme.c
 create mode 100644 drivers/scsi/qla2xxx/qla_nvme.h

Comments

Johannes Thumshirn June 19, 2017, 10:01 a.m. UTC | #1
On Fri, Jun 16, 2017 at 03:47:41PM -0700, Himanshu Madhani wrote:
[...]
>  /*
> + * Global functions prototype in qla_nvme.c source file.
> + */
> +extern void qla_nvme_register_hba(scsi_qla_host_t *);
> +extern int  qla_nvme_register_remote(scsi_qla_host_t *, fc_port_t *);
> +extern void qla_nvme_delete(scsi_qla_host_t *);
> +extern void qla_nvme_abort(struct qla_hw_data *, srb_t *sp);
> +extern void qla_nvme_unregister_remote_port(struct work_struct *);
> +extern int qla_nvme_wait_on_rport_del(fc_port_t *);
> +extern void qla_nvme_abort_all(fc_port_t *);
> +extern int qla_nvme_post_cmd(struct nvme_fc_local_port *,
> +    struct nvme_fc_remote_port *, void *, struct nvmefc_fcp_req *);
> +extern int qla_nvme_alloc_queue(struct nvme_fc_local_port *, unsigned int,
> +    u16, void **);
> +extern int qla_nvme_hba_scan(scsi_qla_host_t *);
> +extern void qla_nvme_ls_abort(struct nvme_fc_local_port *,
> +    struct nvme_fc_remote_port *, struct nvmefc_ls_req *);
> +extern int qla_nvme_ls_req(struct nvme_fc_local_port *,
> +    struct nvme_fc_remote_port *, struct nvmefc_ls_req *);
> +extern void qla_nvme_poll(struct nvme_fc_local_port *, void *);
> +extern int qla2x00_start_nvme_mq(srb_t *);
> +extern void qla_nvme_fcp_abort(struct nvme_fc_local_port *,
> +    struct nvme_fc_remote_port *, void *, struct nvmefc_fcp_req *);
> +extern void qla24xx_nvme_ls4_iocb(scsi_qla_host_t *, struct pt_ls4_request *,
> +    struct req_que *);

Can't these go into a header instead of using extern?

[...]

> @@ -4662,6 +4667,9 @@ qla2x00_configure_fabric(scsi_qla_host_t *vha)
>  			break;
>  	} while (0);
>  
> +	if ((!vha->nvme_local_port) && (vha->flags.nvme_enabled))


[...]

> +		if ((ql2xnvmeenable) && IS_QLA27XX(ha))
> +			mcp->mb[4] |= NVME_ENABLE_FLAG;
> +

[...]

> +
> +		/* bit 26 of fw_attributes */
> +		if ((ha->fw_attributes_h & 0x400) && (ql2xnvmeenable)) {

Superfluous parenthesis.

[...]

> +			vha->flags.nvme_enabled = 1;
> +			icb->firmware_options_2 &=
> +			    cpu_to_le32(~(BIT_0 | BIT_1 | BIT_2 | BIT_3));

0xfffffff0 or maybe even ~0xf?

[...]

> +	ret = nvme_fc_register_remoteport(vha->nvme_local_port, &rport->req,
> +	    &fcport->nvme_remote_port);
> +	if (ret) {
> +		ql_log(ql_log_warn, vha, 0x212e,
> +		    "Failed to register remote port. Transport returned %d\n",
> +		    ret);
> +		return ret;
> +	}
> +
> +	fcport->nvme_remote_port->private = fcport;

A FC-NVMe remote port can be in used right after a call to
nvme_fc_register_remoteport(), if I'm not mistaken. So you should add the
fcport to the nvme_remote_port->private _before_ calling
nvme_fc_register_remoteport().

> +	fcport->nvme_flag |= NVME_FLAG_REGISTERED;
> +	atomic_set(&fcport->nvme_ref_count, 1);
> +	rport->fcport = fcport;
> +	list_add_tail(&rport->list, &vha->nvme_rport_list);
> +#endif
> +	return 0;
> +}
> +
> +/*
> + * Perform a scan and register those ports with FC-NVMe transport
> + * input:  HBA handle
> + */
> +int qla_nvme_hba_scan(scsi_qla_host_t *vha)

This should be bool.

> +{
> +#if (IS_ENABLED(CONFIG_NVME_FC))
> +	fc_port_t	*fcport;
> +	uint8_t not_found = 1;

bool found = false;

> +
> +	/*
> +	 * We are using the first HBA found
> +	 * Find matching fcport
> +	 */
> +	list_for_each_entry(fcport, &vha->vp_fcports, list) {
> +		if ((fcport->fc4f_nvme) &&
> +		    (!(fcport->nvme_flag & NVME_FLAG_REGISTERED))) {
> +			qla_nvme_register_remote(vha, fcport);
> +			not_found = 0;

found = true;

> +		}
> +	}
> +
> +	return not_found;

return found;

> +#else
> +	return 1;

return false;

> +#endif
> +}

And update the call-sites.

[...]

> +	if (unlikely((nvme->u.nvme.comp_status) || res))

Parenthesis again.

[...]

> +void qla_nvme_ls_abort(struct nvme_fc_local_port *lport,
> +    struct nvme_fc_remote_port *rport, struct nvmefc_ls_req *fd)
> +{
> +	struct nvme_private *priv = fd->private;
> +	fc_port_t *fcport = rport->private;
> +	srb_t *sp = priv->sp;
> +	int rval;
> +	struct qla_hw_data *ha;
> +
> +	ql_log(ql_log_info, fcport->vha, 0x212b,
> +	    "%s: aborting sp:%p on fcport:%p\n", __func__, sp, fcport);
> +
> +	ha = fcport->vha->hw;
> +	rval = ha->isp_ops->abort_command(sp);
> +	if (rval != QLA_SUCCESS)
> +		ql_log(ql_log_warn, fcport->vha, 0x2125,
> +		    "%s: failed to abort LS command for SP:%p rval=%x\n",
> +		    __func__, sp, rval);
> +}

I think qla_nvme_ls_abort() is a bit too verbose, it nearly only consists of
logging and thus hides the call to abort_command() from the reader.

> +
> +static void qla_nvme_ls_complete(struct work_struct *work)
> +{
> +	struct nvme_private *priv =
> +	    container_of(work, struct nvme_private, ls_work);
> +	struct nvmefc_ls_req *fd = priv->fd;
> +
> +	(*fd->done)(fd, priv->comp_status);

Can't we get the function pointer to done directly this LISP style doesn't
really look too nice;

	done(fd, priv->comp_status); 
or maybe
	fd->done(fd, priv->comp_status);

> +
> +int qla_nvme_ls_req(struct nvme_fc_local_port *lport,
> +    struct nvme_fc_remote_port *rport, struct nvmefc_ls_req *fd)
> +{
> +	fc_port_t *fcport;
> +	struct srb_iocb   *nvme;
> +	struct scsi_qla_host *vha;
> +	int     rval = QLA_FUNCTION_FAILED;
> +	struct qla_hw_data *ha;
> +	srb_t           *sp;
> +	struct nvme_private *priv;
> +
> +	if (!fd) {
> +		ql_log(ql_log_warn, NULL, 0x2135, "NO NVMe FCP reqeust\n");
> +		return rval;
> +	}

Just curious, how likely is this? I.e. have you hit the case in debugging?

> +
> +	priv = fd->private;
> +	fcport = (fc_port_t *)rport->private;
> +	if (!fcport) {
> +		ql_log(ql_log_warn, NULL, 0x212c,
> +		    "%s: No fcport ptr\n", __func__);
> +		return rval;
> +	}

Same here.

[...]

> +
> +void qla_nvme_fcp_abort(struct nvme_fc_local_port *lport,
> +    struct nvme_fc_remote_port *rport, void *hw_queue_handle,
> +    struct nvmefc_fcp_req *fd)

The comment from the other abort function above applies here as well.

> +void qla_nvme_poll(struct nvme_fc_local_port *lport, void *hw_queue_handle)
> +{
> +	struct scsi_qla_host *vha = lport->private;
> +	unsigned long flags;
> +	struct qla_qpair *qpair = (struct qla_qpair *)hw_queue_handle;
> +
> +	/* Acquire ring specific lock */
> +	spin_lock_irqsave(&qpair->qp_lock, flags);
> +	qla24xx_process_response_queue(vha, qpair->rsp);
> +	spin_unlock_irqrestore(&qpair->qp_lock, flags);

Just a side note, I _think_ needing a spin_lock here is a design issue.

> +	/* Acquire qpair specific lock */
> +	spin_lock_irqsave(&qpair->qp_lock, flags);

Same here. What is this lock protecting? I see qla_qpair does have a
reference count (not kref backed btw), so if you're only protecting
against the structure vanishing under your feet, you could change
this to krefs. If you're protecting structure members the use of RCUs
spring to mind. rwlocks as well, if the content that is protected is
read-mostly. But please note, reworking this requires extensive testing. But a
spin_lock_irqsave() in the I/O path raises a red-flag for me.

[...]

> +
> +static void qla_nvme_localport_delete(struct nvme_fc_local_port *lport)
> +{
> +	struct scsi_qla_host *vha = lport->private;
> +
> +	ql_log(ql_log_info, vha, 0x210f,
> +	    "localport delete of %p completed.\n", vha->nvme_local_port);
> +
> +	vha->nvme_local_port = NULL;
> +	atomic_dec(&vha->nvme_ref_count);

Nit, the debug message says completed, but you haven't "completed" the delete
;-).

> +}
> +
> +static void qla_nvme_remoteport_delete(struct nvme_fc_remote_port *rport)
> +{
> +	fc_port_t *fcport;
> +	struct nvme_rport *r_port, *trport;
> +
> +	fcport = (fc_port_t *)rport->private;
> +	fcport->nvme_remote_port = NULL;
> +	fcport->nvme_flag &= ~NVME_FLAG_REGISTERED;
> +	atomic_dec(&fcport->nvme_ref_count);
> +
> +	ql_log(ql_log_info, fcport->vha, 0x2110,
> +	    "remoteport_delete of %p completed.\n", fcport);
> +
> +	list_for_each_entry_safe(r_port, trport,
> +	    &fcport->vha->nvme_rport_list, list) {
> +		if (r_port->fcport == fcport) {
> +			list_del(&r_port->list);
> +			break;
> +		}
> +	}
> +	kfree(r_port);

Ditto.

[..]

> +static int qla_nvme_wait_on_command(srb_t *sp)
> +{
> +#define NVME_ABORT_POLLING_PERIOD    1000
> +#define NVME_ABORT_WAIT_ITER         ((2 * 1000) / (NVME_ABORT_POLLING_PERIOD))
> +	unsigned long wait_iter = NVME_ABORT_WAIT_ITER;
> +	int ret = QLA_SUCCESS;
> +
> +	while ((atomic_read(&sp->ref_count) > 1) && wait_iter--)
> +		msleep(NVME_ABORT_POLLING_PERIOD);
> +
> +	if (atomic_read(&sp->ref_count) > 1)
> +		ret = QLA_FUNCTION_FAILED;
> +
> +	return ret;

wait_event_timeout()?

> +int qla_nvme_wait_on_rport_del(fc_port_t *fcport)
> +{
> +	unsigned long wait_iter = NVME_ABORT_WAIT_ITER;
> +	int ret = QLA_SUCCESS;
> +
> +	while (atomic_read(&fcport->nvme_ref_count) && wait_iter--)
> +		msleep(NVME_ABORT_POLLING_PERIOD);

Ditto.

[...]

> +void qla_nvme_abort(struct qla_hw_data *ha, srb_t *sp)
> +{
> +	int rval;
> +
> +	rval = ha->isp_ops->abort_command(sp);
> +	if (!rval) {

	if (!rval && !qla_nvme_wait_on_command(sp))
		ql_log(ql_log_warn, NULL, 0x2112,
			"nvme_wait_on_comand timed out waiting on sp=%p\n",
			sp);

[...]

> +void qla_nvme_delete(scsi_qla_host_t *vha)
> +{
> +#if (IS_ENABLED(CONFIG_NVME_FC))
> +	struct nvme_rport *rport, *trport;
> +	fc_port_t *fcport;
> +
> +	if (!list_empty(&vha->nvme_rport_list)) {
> +		list_for_each_entry_safe(rport, trport,
> +		    &vha->nvme_rport_list, list) {

Do you really need the if (!list_empty($list)) call before the
list_for_each_entry_safe()?


> +			fcport = rport->fcport;
> +			ql_log(ql_log_info, fcport->vha, 0x2114,
> +			    "%s: fcport=%p\n", __func__, fcport);
> +			nvme_fc_unregister_remoteport(
> +			    fcport->nvme_remote_port);
> +			qla_nvme_wait_on_rport_del(fcport);
> +			qla_nvme_abort_all(fcport);
> +		}
> +	}
> +
> +	if (vha->nvme_local_port) {
> +		ql_log(ql_log_info, vha, 0x2115,
> +		    "unregister localport = %p\n", vha->nvme_local_port);
> +		nvme_fc_unregister_localport(vha->nvme_local_port);
> +	}
> +
> +	while (atomic_read(&vha->nvme_ref_count))
> +		schedule();

Ugh..., no!
wait_event()?

[...]

> +
> +	ret = nvme_fc_register_localport(&pinfo, tmpl,
> +	    get_device(&ha->pdev->dev), &vha->nvme_local_port);

[...]

> +	vha->nvme_local_port->private = vha;

Please move above the nvme_fc_register_localport().

[...]

> +#if (IS_ENABLED(CONFIG_NVME_FC))
> +int ql2xnvmeenable = 1;
> +#else
> +int ql2xnvmeenable;
> +#endif
> +module_param(ql2xnvmeenable, int, 0644);
> +MODULE_PARM_DESC(ql2xnvmeenable,
> +    "Enables NVME support"
> +    "0 - no NVMe. Default is N");
> +

The message is a bit misleading. NVMe is enabled *IFF* CONFIG_NVME_FC is
enabled.
Madhani, Himanshu June 19, 2017, 6:52 p.m. UTC | #2
> On Jun 19, 2017, at 3:01 AM, Johannes Thumshirn <jthumshirn@suse.de> wrote:
> 
> On Fri, Jun 16, 2017 at 03:47:41PM -0700, Himanshu Madhani wrote:
> [...]
>> /*
>> + * Global functions prototype in qla_nvme.c source file.
>> + */
>> +extern void qla_nvme_register_hba(scsi_qla_host_t *);
>> +extern int  qla_nvme_register_remote(scsi_qla_host_t *, fc_port_t *);
>> +extern void qla_nvme_delete(scsi_qla_host_t *);
>> +extern void qla_nvme_abort(struct qla_hw_data *, srb_t *sp);
>> +extern void qla_nvme_unregister_remote_port(struct work_struct *);
>> +extern int qla_nvme_wait_on_rport_del(fc_port_t *);
>> +extern void qla_nvme_abort_all(fc_port_t *);
>> +extern int qla_nvme_post_cmd(struct nvme_fc_local_port *,
>> +    struct nvme_fc_remote_port *, void *, struct nvmefc_fcp_req *);
>> +extern int qla_nvme_alloc_queue(struct nvme_fc_local_port *, unsigned int,
>> +    u16, void **);
>> +extern int qla_nvme_hba_scan(scsi_qla_host_t *);
>> +extern void qla_nvme_ls_abort(struct nvme_fc_local_port *,
>> +    struct nvme_fc_remote_port *, struct nvmefc_ls_req *);
>> +extern int qla_nvme_ls_req(struct nvme_fc_local_port *,
>> +    struct nvme_fc_remote_port *, struct nvmefc_ls_req *);
>> +extern void qla_nvme_poll(struct nvme_fc_local_port *, void *);
>> +extern int qla2x00_start_nvme_mq(srb_t *);
>> +extern void qla_nvme_fcp_abort(struct nvme_fc_local_port *,
>> +    struct nvme_fc_remote_port *, void *, struct nvmefc_fcp_req *);
>> +extern void qla24xx_nvme_ls4_iocb(scsi_qla_host_t *, struct pt_ls4_request *,
>> +    struct req_que *);
> 
> Can't these go into a header instead of using extern?
> 

Sure.

> [...]
> 
>> @@ -4662,6 +4667,9 @@ qla2x00_configure_fabric(scsi_qla_host_t *vha)
>> 			break;
>> 	} while (0);
>> 
>> +	if ((!vha->nvme_local_port) && (vha->flags.nvme_enabled))
> 
> 
> [...]
> 
>> +		if ((ql2xnvmeenable) && IS_QLA27XX(ha))
>> +			mcp->mb[4] |= NVME_ENABLE_FLAG;
>> +
> 
> [...]
> 
>> +
>> +		/* bit 26 of fw_attributes */
>> +		if ((ha->fw_attributes_h & 0x400) && (ql2xnvmeenable)) {
> 
> Superfluous parenthesis.
> 

Good catch. will fix in v2

> [...]
> 
>> +			vha->flags.nvme_enabled = 1;
>> +			icb->firmware_options_2 &=
>> +			    cpu_to_le32(~(BIT_0 | BIT_1 | BIT_2 | BIT_3));
> 
> 0xfffffff0 or maybe even ~0xf?
> 

will fix in v2

> [...]
> 
>> +	ret = nvme_fc_register_remoteport(vha->nvme_local_port, &rport->req,
>> +	    &fcport->nvme_remote_port);
>> +	if (ret) {
>> +		ql_log(ql_log_warn, vha, 0x212e,
>> +		    "Failed to register remote port. Transport returned %d\n",
>> +		    ret);
>> +		return ret;
>> +	}
>> +
>> +	fcport->nvme_remote_port->private = fcport;
> 
> A FC-NVMe remote port can be in used right after a call to
> nvme_fc_register_remoteport(), if I'm not mistaken. So you should add the
> fcport to the nvme_remote_port->private _before_ calling
> nvme_fc_register_remoteport().

Will check this out. 

> 
>> +	fcport->nvme_flag |= NVME_FLAG_REGISTERED;
>> +	atomic_set(&fcport->nvme_ref_count, 1);
>> +	rport->fcport = fcport;
>> +	list_add_tail(&rport->list, &vha->nvme_rport_list);
>> +#endif
>> +	return 0;
>> +}
>> +
>> +/*
>> + * Perform a scan and register those ports with FC-NVMe transport
>> + * input:  HBA handle
>> + */
>> +int qla_nvme_hba_scan(scsi_qla_host_t *vha)
> 
> This should be bool.
> 
>> +{
>> +#if (IS_ENABLED(CONFIG_NVME_FC))
>> +	fc_port_t	*fcport;
>> +	uint8_t not_found = 1;
> 
> bool found = false;
> 
>> +
>> +	/*
>> +	 * We are using the first HBA found
>> +	 * Find matching fcport
>> +	 */
>> +	list_for_each_entry(fcport, &vha->vp_fcports, list) {
>> +		if ((fcport->fc4f_nvme) &&
>> +		    (!(fcport->nvme_flag & NVME_FLAG_REGISTERED))) {
>> +			qla_nvme_register_remote(vha, fcport);
>> +			not_found = 0;
> 
> found = true;
> 
>> +		}
>> +	}
>> +
>> +	return not_found;
> 
> return found;
> 
>> +#else
>> +	return 1;
> 
> return false;
> 
>> +#endif
>> +}
> 
> And update the call-sites.
> 

Ack. 

> [...]
> 
>> +	if (unlikely((nvme->u.nvme.comp_status) || res))
> 
> Parenthesis again.
> 

Ack. 

> [...]
> 
>> +void qla_nvme_ls_abort(struct nvme_fc_local_port *lport,
>> +    struct nvme_fc_remote_port *rport, struct nvmefc_ls_req *fd)
>> +{
>> +	struct nvme_private *priv = fd->private;
>> +	fc_port_t *fcport = rport->private;
>> +	srb_t *sp = priv->sp;
>> +	int rval;
>> +	struct qla_hw_data *ha;
>> +
>> +	ql_log(ql_log_info, fcport->vha, 0x212b,
>> +	    "%s: aborting sp:%p on fcport:%p\n", __func__, sp, fcport);
>> +
>> +	ha = fcport->vha->hw;
>> +	rval = ha->isp_ops->abort_command(sp);
>> +	if (rval != QLA_SUCCESS)
>> +		ql_log(ql_log_warn, fcport->vha, 0x2125,
>> +		    "%s: failed to abort LS command for SP:%p rval=%x\n",
>> +		    __func__, sp, rval);
>> +}
> 
> I think qla_nvme_ls_abort() is a bit too verbose, it nearly only consists of
> logging and thus hides the call to abort_command() from the reader.
> 

Sure. will make it more readable.


>> +
>> +static void qla_nvme_ls_complete(struct work_struct *work)
>> +{
>> +	struct nvme_private *priv =
>> +	    container_of(work, struct nvme_private, ls_work);
>> +	struct nvmefc_ls_req *fd = priv->fd;
>> +
>> +	(*fd->done)(fd, priv->comp_status);
> 
> Can't we get the function pointer to done directly this LISP style doesn't
> really look too nice;
> 
> 	done(fd, priv->comp_status); 
> or maybe
> 	fd->done(fd, priv->comp_status);
> 

Sure. will fix this for v2.

>> +
>> +int qla_nvme_ls_req(struct nvme_fc_local_port *lport,
>> +    struct nvme_fc_remote_port *rport, struct nvmefc_ls_req *fd)
>> +{
>> +	fc_port_t *fcport;
>> +	struct srb_iocb   *nvme;
>> +	struct scsi_qla_host *vha;
>> +	int     rval = QLA_FUNCTION_FAILED;
>> +	struct qla_hw_data *ha;
>> +	srb_t           *sp;
>> +	struct nvme_private *priv;
>> +
>> +	if (!fd) {
>> +		ql_log(ql_log_warn, NULL, 0x2135, "NO NVMe FCP reqeust\n");
>> +		return rval;
>> +	}
> 
> Just curious, how likely is this? I.e. have you hit the case in debugging?
> 

Defensive coding. This can be removed. 

>> +
>> +	priv = fd->private;
>> +	fcport = (fc_port_t *)rport->private;
>> +	if (!fcport) {
>> +		ql_log(ql_log_warn, NULL, 0x212c,
>> +		    "%s: No fcport ptr\n", __func__);
>> +		return rval;
>> +	}
> 
> Same here.
> 
> [...]
> 
>> +
>> +void qla_nvme_fcp_abort(struct nvme_fc_local_port *lport,
>> +    struct nvme_fc_remote_port *rport, void *hw_queue_handle,
>> +    struct nvmefc_fcp_req *fd)
> 
> The comment from the other abort function above applies here as well.
> 

Ack.

>> +void qla_nvme_poll(struct nvme_fc_local_port *lport, void *hw_queue_handle)
>> +{
>> +	struct scsi_qla_host *vha = lport->private;
>> +	unsigned long flags;
>> +	struct qla_qpair *qpair = (struct qla_qpair *)hw_queue_handle;
>> +
>> +	/* Acquire ring specific lock */
>> +	spin_lock_irqsave(&qpair->qp_lock, flags);
>> +	qla24xx_process_response_queue(vha, qpair->rsp);
>> +	spin_unlock_irqrestore(&qpair->qp_lock, flags);
> 
> Just a side note, I _think_ needing a spin_lock here is a design issue.
> 
>> +	/* Acquire qpair specific lock */
>> +	spin_lock_irqsave(&qpair->qp_lock, flags);
> 
> Same here. What is this lock protecting? I see qla_qpair does have a
> reference count (not kref backed btw), so if you're only protecting
> against the structure vanishing under your feet, you could change
> this to krefs. If you're protecting structure members the use of RCUs
> spring to mind. rwlocks as well, if the content that is protected is
> read-mostly. But please note, reworking this requires extensive testing. But a
> spin_lock_irqsave() in the I/O path raises a red-flag for me.
> 

The lock here is protecting hardware queue resource of qla2xxx driver.

> [...]
> 
>> +
>> +static void qla_nvme_localport_delete(struct nvme_fc_local_port *lport)
>> +{
>> +	struct scsi_qla_host *vha = lport->private;
>> +
>> +	ql_log(ql_log_info, vha, 0x210f,
>> +	    "localport delete of %p completed.\n", vha->nvme_local_port);
>> +
>> +	vha->nvme_local_port = NULL;
>> +	atomic_dec(&vha->nvme_ref_count);
> 
> Nit, the debug message says completed, but you haven't "completed" the delete
> ;-).
> 

:) good catch again 

>> +}
>> +
>> +static void qla_nvme_remoteport_delete(struct nvme_fc_remote_port *rport)
>> +{
>> +	fc_port_t *fcport;
>> +	struct nvme_rport *r_port, *trport;
>> +
>> +	fcport = (fc_port_t *)rport->private;
>> +	fcport->nvme_remote_port = NULL;
>> +	fcport->nvme_flag &= ~NVME_FLAG_REGISTERED;
>> +	atomic_dec(&fcport->nvme_ref_count);
>> +
>> +	ql_log(ql_log_info, fcport->vha, 0x2110,
>> +	    "remoteport_delete of %p completed.\n", fcport);
>> +
>> +	list_for_each_entry_safe(r_port, trport,
>> +	    &fcport->vha->nvme_rport_list, list) {
>> +		if (r_port->fcport == fcport) {
>> +			list_del(&r_port->list);
>> +			break;
>> +		}
>> +	}
>> +	kfree(r_port);
> 
> Ditto.
> 

:) Ack

> [..]
> 
>> +static int qla_nvme_wait_on_command(srb_t *sp)
>> +{
>> +#define NVME_ABORT_POLLING_PERIOD    1000
>> +#define NVME_ABORT_WAIT_ITER         ((2 * 1000) / (NVME_ABORT_POLLING_PERIOD))
>> +	unsigned long wait_iter = NVME_ABORT_WAIT_ITER;
>> +	int ret = QLA_SUCCESS;
>> +
>> +	while ((atomic_read(&sp->ref_count) > 1) && wait_iter--)
>> +		msleep(NVME_ABORT_POLLING_PERIOD);
>> +
>> +	if (atomic_read(&sp->ref_count) > 1)
>> +		ret = QLA_FUNCTION_FAILED;
>> +
>> +	return ret;
> 
> wait_event_timeout()?
> 
will fix up in v2

>> +int qla_nvme_wait_on_rport_del(fc_port_t *fcport)
>> +{
>> +	unsigned long wait_iter = NVME_ABORT_WAIT_ITER;
>> +	int ret = QLA_SUCCESS;
>> +
>> +	while (atomic_read(&fcport->nvme_ref_count) && wait_iter--)
>> +		msleep(NVME_ABORT_POLLING_PERIOD);
> 
> Ditto.
> 

Ack

> [...]
> 
>> +void qla_nvme_abort(struct qla_hw_data *ha, srb_t *sp)
>> +{
>> +	int rval;
>> +
>> +	rval = ha->isp_ops->abort_command(sp);
>> +	if (!rval) {
> 
> 	if (!rval && !qla_nvme_wait_on_command(sp))
> 		ql_log(ql_log_warn, NULL, 0x2112,
> 			"nvme_wait_on_comand timed out waiting on sp=%p\n",
> 			sp);
> 
> [...]
> 
> 
> 
> 
>> +			fcport = rport->fcport;
>> +			ql_log(ql_log_info, fcport->vha, 0x2114,
>> +			    "%s: fcport=%p\n", __func__, fcport);
>> +			nvme_fc_unregister_remoteport(
>> +			    fcport->nvme_remote_port);
>> +			qla_nvme_wait_on_rport_del(fcport);
>> +			qla_nvme_abort_all(fcport);
>> +		}
>> +	}
>> +
>> +	if (vha->nvme_local_port) {
>> +		ql_log(ql_log_info, vha, 0x2115,
>> +		    "unregister localport = %p\n", vha->nvme_local_port);
>> +		nvme_fc_unregister_localport(vha->nvme_local_port);
>> +	}
>> +
>> +	while (atomic_read(&vha->nvme_ref_count))
>> +		schedule();
> 
> Ugh..., no!
> wait_event()?
> 

Ack. will fix up in v2

> [...]
> 
>> +
>> +	ret = nvme_fc_register_localport(&pinfo, tmpl,
>> +	    get_device(&ha->pdev->dev), &vha->nvme_local_port);
> 
> [...]
> 
>> +	vha->nvme_local_port->private = vha;
> 
> Please move above the nvme_fc_register_localport().
> 
> [...]
> 
>> +#if (IS_ENABLED(CONFIG_NVME_FC))
>> +int ql2xnvmeenable = 1;
>> +#else
>> +int ql2xnvmeenable;
>> +#endif
>> +module_param(ql2xnvmeenable, int, 0644);
>> +MODULE_PARM_DESC(ql2xnvmeenable,
>> +    "Enables NVME support"
>> +    "0 - no NVMe. Default is N");
>> +
> 
> The message is a bit misleading. NVMe is enabled *IFF* CONFIG_NVME_FC is
> enabled.
> 

Will fix up in v2.

> -- 
> Johannes Thumshirn                                          Storage
> jthumshirn@suse.de                                +49 911 74053 689
> SUSE LINUX GmbH, Maxfeldstr. 5, 90409 Nürnberg
> GF: Felix Imendörffer, Jane Smithard, Graham Norton
> HRB 21284 (AG Nürnberg)
> Key fingerprint = EC38 9CAB C2C4 F25D 8600 D0D0 0393 969D 2D76 0850

Thanks,
- Himanshu
James Smart June 19, 2017, 9:22 p.m. UTC | #3
On 6/16/2017 3:47 PM, Himanshu Madhani wrote:
> From: Duane Grigsby <duane.grigsby@cavium.com>
>
> This code provides the interfaces to register remote and local ports
> of FC4 type 0x28 with the FC-NVMe transport and transports the
> requests (FC-NVMe FC link services and FC-NVMe commands IUs) to the
> fabric. It also provides the support for allocating h/w queues and
> aborting FC-NVMe FC requests.
>
> Signed-off-by: Darren Trapp <darren.trapp@cavium.com>
> Signed-off-by: Duane Grigsby <duane.grigsby@cavium.com>
> Signed-off-by: Anil Gurumurthy <anil.gurumurhty@cavium.com>
> Signed-off-by: Giridhar Malavali <giridhar.malavali@cavium.com>
> Signed-off-by: Himanshu Madhani <himanshu.madhani@cavium.com>
>
> +#if (IS_ENABLED(CONFIG_NVME_FC))
> +extern int nvme_fc_register_remoteport(struct nvme_fc_local_port *,
> +    struct nvme_fc_port_info *, struct nvme_fc_remote_port **);
> +extern int nvme_fc_register_localport(struct nvme_fc_port_info *,
> +    struct nvme_fc_port_template *, struct device *,
> +    struct nvme_fc_local_port **);
> +extern int nvme_fc_unregister_localport(struct nvme_fc_local_port *);
> +#endif

There should be no need for these. They are in the 
include/linux/nvme-fc-driver.h header.


> +static void qla_nvme_sp_ls_done(void *ptr, int res)
> +{
> +	srb_t *sp = ptr;
> +	struct srb_iocb *nvme;
> +	struct nvmefc_ls_req   *fd;
> +	struct nvme_private *priv;
> +
> +	if (atomic_read(&sp->ref_count) == 0) {
> +		ql_log(ql_log_warn, sp->fcport->vha, 0x2123,
> +		    "SP reference-count to ZERO on LS_done -- sp=%p.\n", sp);
> +		return;
> +	}
> +
> +	if (!atomic_dec_and_test(&sp->ref_count))
> +		return;
> +
> +	if (res)
> +		res = NVME_SC_FC_TRANSPORT_ERROR;

LS request failures status should be -Exxx values, not nvme status codes 
(which don't apply to LS's).
The api probably wasn't super clear on this.

> +
> +	nvme = &sp->u.iocb_cmd;
> +	fd = nvme->u.nvme.desc;
> +	priv = fd->private;
> +	priv->comp_status = res;
> +	schedule_work(&priv->ls_work);
> +	/* work schedule doesn't need the sp */
> +	qla2x00_rel_sp(sp);
> +}
> +
> +static void qla_nvme_sp_done(void *ptr, int res)
> +{
> +	srb_t *sp = ptr;
> +	struct srb_iocb *nvme;
> +	struct nvmefc_fcp_req *fd;
> +
> +	nvme = &sp->u.iocb_cmd;
> +	fd = nvme->u.nvme.desc;
> +
> +	if (!atomic_dec_and_test(&sp->ref_count))
> +		return;
> +
> +	if (!(sp->fcport->nvme_flag & NVME_FLAG_REGISTERED))
> +		goto rel;
> +
> +	if (unlikely((nvme->u.nvme.comp_status) || res))
> +		fd->status = NVME_SC_FC_TRANSPORT_ABORTED;
> +	else
> +		fd->status = 0;

FCP req failures status should be -Exxx values, not nvme status codes. 
This is clear in the api header.

-- james
Madhani, Himanshu June 20, 2017, 4:31 a.m. UTC | #4
> On Jun 19, 2017, at 2:22 PM, James Smart <james.smart@broadcom.com> wrote:
> 
> On 6/16/2017 3:47 PM, Himanshu Madhani wrote:
>> From: Duane Grigsby <duane.grigsby@cavium.com>
>> 
>> This code provides the interfaces to register remote and local ports
>> of FC4 type 0x28 with the FC-NVMe transport and transports the
>> requests (FC-NVMe FC link services and FC-NVMe commands IUs) to the
>> fabric. It also provides the support for allocating h/w queues and
>> aborting FC-NVMe FC requests.
>> 
>> Signed-off-by: Darren Trapp <darren.trapp@cavium.com>
>> Signed-off-by: Duane Grigsby <duane.grigsby@cavium.com>
>> Signed-off-by: Anil Gurumurthy <anil.gurumurhty@cavium.com>
>> Signed-off-by: Giridhar Malavali <giridhar.malavali@cavium.com>
>> Signed-off-by: Himanshu Madhani <himanshu.madhani@cavium.com>
>> 
>> +#if (IS_ENABLED(CONFIG_NVME_FC))
>> +extern int nvme_fc_register_remoteport(struct nvme_fc_local_port *,
>> +    struct nvme_fc_port_info *, struct nvme_fc_remote_port **);
>> +extern int nvme_fc_register_localport(struct nvme_fc_port_info *,
>> +    struct nvme_fc_port_template *, struct device *,
>> +    struct nvme_fc_local_port **);
>> +extern int nvme_fc_unregister_localport(struct nvme_fc_local_port *);
>> +#endif
> 
> There should be no need for these. They are in the include/linux/nvme-fc-driver.h header.
> 

Agree. Will remove it 

> 
>> +static void qla_nvme_sp_ls_done(void *ptr, int res)
>> +{
>> +	srb_t *sp = ptr;
>> +	struct srb_iocb *nvme;
>> +	struct nvmefc_ls_req   *fd;
>> +	struct nvme_private *priv;
>> +
>> +	if (atomic_read(&sp->ref_count) == 0) {
>> +		ql_log(ql_log_warn, sp->fcport->vha, 0x2123,
>> +		    "SP reference-count to ZERO on LS_done -- sp=%p.\n", sp);
>> +		return;
>> +	}
>> +
>> +	if (!atomic_dec_and_test(&sp->ref_count))
>> +		return;
>> +
>> +	if (res)
>> +		res = NVME_SC_FC_TRANSPORT_ERROR;
> 
> LS request failures status should be -Exxx values, not nvme status codes (which don't apply to LS's).
> The api probably wasn't super clear on this.
> 

Will fix up in v2.

>> +
>> +	nvme = &sp->u.iocb_cmd;
>> +	fd = nvme->u.nvme.desc;
>> +	priv = fd->private;
>> +	priv->comp_status = res;
>> +	schedule_work(&priv->ls_work);
>> +	/* work schedule doesn't need the sp */
>> +	qla2x00_rel_sp(sp);
>> +}
>> +
>> +static void qla_nvme_sp_done(void *ptr, int res)
>> +{
>> +	srb_t *sp = ptr;
>> +	struct srb_iocb *nvme;
>> +	struct nvmefc_fcp_req *fd;
>> +
>> +	nvme = &sp->u.iocb_cmd;
>> +	fd = nvme->u.nvme.desc;
>> +
>> +	if (!atomic_dec_and_test(&sp->ref_count))
>> +		return;
>> +
>> +	if (!(sp->fcport->nvme_flag & NVME_FLAG_REGISTERED))
>> +		goto rel;
>> +
>> +	if (unlikely((nvme->u.nvme.comp_status) || res))
>> +		fd->status = NVME_SC_FC_TRANSPORT_ABORTED;
>> +	else
>> +		fd->status = 0;
> 
> FCP req failures status should be -Exxx values, not nvme status codes. This is clear in the api header.
> 

Sure. Will fix this in v2. 

> -- james
>
diff mbox

Patch

diff --git a/drivers/scsi/qla2xxx/Makefile b/drivers/scsi/qla2xxx/Makefile
index 44def6bb4bb0..0b767a0bb308 100644
--- a/drivers/scsi/qla2xxx/Makefile
+++ b/drivers/scsi/qla2xxx/Makefile
@@ -1,6 +1,6 @@ 
 qla2xxx-y := qla_os.o qla_init.o qla_mbx.o qla_iocb.o qla_isr.o qla_gs.o \
 		qla_dbg.o qla_sup.o qla_attr.o qla_mid.o qla_dfs.o qla_bsg.o \
-		qla_nx.o qla_mr.o qla_nx2.o qla_target.o qla_tmpl.o
+		qla_nx.o qla_mr.o qla_nx2.o qla_target.o qla_tmpl.o qla_nvme.o
 
 obj-$(CONFIG_SCSI_QLA_FC) += qla2xxx.o
 obj-$(CONFIG_TCM_QLA2XXX) += tcm_qla2xxx.o
diff --git a/drivers/scsi/qla2xxx/qla_dbg.c b/drivers/scsi/qla2xxx/qla_dbg.c
index cf4f47603a91..d840529fc023 100644
--- a/drivers/scsi/qla2xxx/qla_dbg.c
+++ b/drivers/scsi/qla2xxx/qla_dbg.c
@@ -15,7 +15,7 @@ 
  * |                              |                    | 0x015b-0x0160	|
  * |                              |                    | 0x016e		|
  * | Mailbox commands             |       0x1199       | 0x1193		|
- * | Device Discovery             |       0x2131       | 0x210e-0x2116  |
+ * | Device Discovery             |       0x2134       | 0x210e-0x2116  |
  * |				  | 		       | 0x211a         |
  * |                              |                    | 0x211c-0x2128  |
  * |                              |                    | 0x212a-0x2130  |
diff --git a/drivers/scsi/qla2xxx/qla_def.h b/drivers/scsi/qla2xxx/qla_def.h
index 693c42392886..c1edfa55b071 100644
--- a/drivers/scsi/qla2xxx/qla_def.h
+++ b/drivers/scsi/qla2xxx/qla_def.h
@@ -37,6 +37,7 @@ 
 #include "qla_bsg.h"
 #include "qla_nx.h"
 #include "qla_nx2.h"
+#include "qla_nvme.h"
 #define QLA2XXX_DRIVER_NAME	"qla2xxx"
 #define QLA2XXX_APIDEV		"ql2xapidev"
 #define QLA2XXX_MANUFACTURER	"QLogic Corporation"
@@ -423,6 +424,7 @@  struct srb_iocb {
 			int rsp_len;
 			dma_addr_t cmd_dma;
 			dma_addr_t rsp_dma;
+			enum nvmefc_fcp_datadir dir;
 			uint32_t dl;
 			uint32_t timeout_sec;
 		} nvme;
@@ -452,6 +454,7 @@  struct srb_iocb {
 #define SRB_NACK_PRLI	17
 #define SRB_NACK_LOGO	18
 #define SRB_NVME_CMD	19
+#define SRB_NVME_LS	20
 #define SRB_PRLI_CMD	21
 
 enum {
diff --git a/drivers/scsi/qla2xxx/qla_gbl.h b/drivers/scsi/qla2xxx/qla_gbl.h
index 6fbee11c1a18..fc2c03bda731 100644
--- a/drivers/scsi/qla2xxx/qla_gbl.h
+++ b/drivers/scsi/qla2xxx/qla_gbl.h
@@ -10,6 +10,32 @@ 
 #include <linux/interrupt.h>
 
 /*
+ * Global functions prototype in qla_nvme.c source file.
+ */
+extern void qla_nvme_register_hba(scsi_qla_host_t *);
+extern int  qla_nvme_register_remote(scsi_qla_host_t *, fc_port_t *);
+extern void qla_nvme_delete(scsi_qla_host_t *);
+extern void qla_nvme_abort(struct qla_hw_data *, srb_t *sp);
+extern void qla_nvme_unregister_remote_port(struct work_struct *);
+extern int qla_nvme_wait_on_rport_del(fc_port_t *);
+extern void qla_nvme_abort_all(fc_port_t *);
+extern int qla_nvme_post_cmd(struct nvme_fc_local_port *,
+    struct nvme_fc_remote_port *, void *, struct nvmefc_fcp_req *);
+extern int qla_nvme_alloc_queue(struct nvme_fc_local_port *, unsigned int,
+    u16, void **);
+extern int qla_nvme_hba_scan(scsi_qla_host_t *);
+extern void qla_nvme_ls_abort(struct nvme_fc_local_port *,
+    struct nvme_fc_remote_port *, struct nvmefc_ls_req *);
+extern int qla_nvme_ls_req(struct nvme_fc_local_port *,
+    struct nvme_fc_remote_port *, struct nvmefc_ls_req *);
+extern void qla_nvme_poll(struct nvme_fc_local_port *, void *);
+extern int qla2x00_start_nvme_mq(srb_t *);
+extern void qla_nvme_fcp_abort(struct nvme_fc_local_port *,
+    struct nvme_fc_remote_port *, void *, struct nvmefc_fcp_req *);
+extern void qla24xx_nvme_ls4_iocb(scsi_qla_host_t *, struct pt_ls4_request *,
+    struct req_que *);
+
+/*
  * Global Function Prototypes in qla_init.c source file.
  */
 extern int qla2x00_initialize_adapter(scsi_qla_host_t *);
@@ -141,6 +167,7 @@  extern int ql2xiniexchg;
 extern int ql2xfwholdabts;
 extern int ql2xmvasynctoatio;
 extern int ql2xuctrlirq;
+extern int ql2xnvmeenable;
 
 extern int qla2x00_loop_reset(scsi_qla_host_t *);
 extern void qla2x00_abort_all_cmds(scsi_qla_host_t *, int);
diff --git a/drivers/scsi/qla2xxx/qla_init.c b/drivers/scsi/qla2xxx/qla_init.c
index 8a2586a04961..73125837267b 100644
--- a/drivers/scsi/qla2xxx/qla_init.c
+++ b/drivers/scsi/qla2xxx/qla_init.c
@@ -4513,6 +4513,11 @@  qla2x00_update_fcport(scsi_qla_host_t *vha, fc_port_t *fcport)
 	fcport->deleted = 0;
 	fcport->logout_on_delete = 1;
 
+	if (fcport->fc4f_nvme) {
+		qla_nvme_register_remote(vha, fcport);
+		return;
+	}
+
 	qla2x00_set_fcport_state(fcport, FCS_ONLINE);
 	qla2x00_iidma_fcport(vha, fcport);
 	qla24xx_update_fcport_fcp_prio(vha, fcport);
@@ -4662,6 +4667,9 @@  qla2x00_configure_fabric(scsi_qla_host_t *vha)
 			break;
 	} while (0);
 
+	if ((!vha->nvme_local_port) && (vha->flags.nvme_enabled))
+		qla_nvme_register_hba(vha);
+
 	if (rval)
 		ql_dbg(ql_dbg_disc, vha, 0x2068,
 		    "Configure fabric error exit rval=%d.\n", rval);
diff --git a/drivers/scsi/qla2xxx/qla_iocb.c b/drivers/scsi/qla2xxx/qla_iocb.c
index daa53235a28a..d40fa000615c 100644
--- a/drivers/scsi/qla2xxx/qla_iocb.c
+++ b/drivers/scsi/qla2xxx/qla_iocb.c
@@ -3155,6 +3155,39 @@  static void qla2x00_send_notify_ack_iocb(srb_t *sp,
 	nack->u.isp24.vp_index = ntfy->u.isp24.vp_index;
 }
 
+/*
+ * Build NVME LS request
+ */
+static int
+qla_nvme_ls(srb_t *sp, struct pt_ls4_request *cmd_pkt)
+{
+	struct srb_iocb *nvme;
+	int     rval = QLA_SUCCESS;
+
+	nvme = &sp->u.iocb_cmd;
+	cmd_pkt->entry_type = PT_LS4_REQUEST;
+	cmd_pkt->entry_count = 1;
+	cmd_pkt->control_flags = CF_LS4_ORIGINATOR << CF_LS4_SHIFT;
+
+	cmd_pkt->timeout = cpu_to_le16(nvme->u.nvme.timeout_sec);
+	cmd_pkt->nport_handle = cpu_to_le16(sp->fcport->loop_id);
+	cmd_pkt->vp_index = sp->fcport->vha->vp_idx;
+
+	cmd_pkt->tx_dseg_count = 1;
+	cmd_pkt->tx_byte_count = nvme->u.nvme.cmd_len;
+	cmd_pkt->dseg0_len = nvme->u.nvme.cmd_len;
+	cmd_pkt->dseg0_address[0] = cpu_to_le32(LSD(nvme->u.nvme.cmd_dma));
+	cmd_pkt->dseg0_address[1] = cpu_to_le32(MSD(nvme->u.nvme.cmd_dma));
+
+	cmd_pkt->rx_dseg_count = 1;
+	cmd_pkt->rx_byte_count = nvme->u.nvme.rsp_len;
+	cmd_pkt->dseg1_len  = nvme->u.nvme.rsp_len;
+	cmd_pkt->dseg1_address[0] =  cpu_to_le32(LSD(nvme->u.nvme.rsp_dma));
+	cmd_pkt->dseg1_address[1] =  cpu_to_le32(MSD(nvme->u.nvme.rsp_dma));
+
+	return rval;
+}
+
 int
 qla2x00_start_sp(srb_t *sp)
 {
@@ -3211,6 +3244,9 @@  qla2x00_start_sp(srb_t *sp)
 	case SRB_FXIOCB_BCMD:
 		qlafx00_fxdisc_iocb(sp, pkt);
 		break;
+	case SRB_NVME_LS:
+		qla_nvme_ls(sp, pkt);
+		break;
 	case SRB_ABT_CMD:
 		IS_QLAFX00(ha) ?
 			qlafx00_abort_iocb(sp, pkt) :
diff --git a/drivers/scsi/qla2xxx/qla_isr.c b/drivers/scsi/qla2xxx/qla_isr.c
index 0d60fd0da604..9f96bcf18865 100644
--- a/drivers/scsi/qla2xxx/qla_isr.c
+++ b/drivers/scsi/qla2xxx/qla_isr.c
@@ -2835,6 +2835,21 @@  qla24xx_abort_iocb_entry(scsi_qla_host_t *vha, struct req_que *req,
 	sp->done(sp, 0);
 }
 
+void qla24xx_nvme_ls4_iocb(scsi_qla_host_t *vha, struct pt_ls4_request *pkt,
+    struct req_que *req)
+{
+	srb_t *sp;
+	const char func[] = "LS4_IOCB";
+	uint16_t comp_status;
+
+	sp = qla2x00_get_sp_from_handle(vha, func, req, pkt);
+	if (!sp)
+		return;
+
+	comp_status = le16_to_cpu(pkt->status);
+	sp->done(sp, comp_status);
+}
+
 /**
  * qla24xx_process_response_queue() - Process response queue entries.
  * @ha: SCSI driver HA context
@@ -2908,6 +2923,10 @@  void qla24xx_process_response_queue(struct scsi_qla_host *vha,
 		case CTIO_CRC2:
 			qlt_response_pkt_all_vps(vha, rsp, (response_t *)pkt);
 			break;
+		case PT_LS4_REQUEST:
+			qla24xx_nvme_ls4_iocb(vha, (struct pt_ls4_request *)pkt,
+			    rsp->req);
+			break;
 		case NOTIFY_ACK_TYPE:
 			if (pkt->handle == QLA_TGT_SKIP_HANDLE)
 				qlt_response_pkt_all_vps(vha, rsp,
diff --git a/drivers/scsi/qla2xxx/qla_mbx.c b/drivers/scsi/qla2xxx/qla_mbx.c
index 1eac67e8fdfd..ec243cae2c0a 100644
--- a/drivers/scsi/qla2xxx/qla_mbx.c
+++ b/drivers/scsi/qla2xxx/qla_mbx.c
@@ -560,6 +560,8 @@  qla2x00_load_ram(scsi_qla_host_t *vha, dma_addr_t req_dma, uint32_t risc_addr,
 }
 
 #define	EXTENDED_BB_CREDITS	BIT_0
+#define	NVME_ENABLE_FLAG	BIT_3
+
 /*
  * qla2x00_execute_fw
  *     Start adapter firmware.
@@ -601,6 +603,9 @@  qla2x00_execute_fw(scsi_qla_host_t *vha, uint32_t risc_addr)
 		} else
 			mcp->mb[4] = 0;
 
+		if ((ql2xnvmeenable) && IS_QLA27XX(ha))
+			mcp->mb[4] |= NVME_ENABLE_FLAG;
+
 		if (ha->flags.exlogins_enabled)
 			mcp->mb[4] |= ENABLE_EXTENDED_LOGIN;
 
@@ -941,6 +946,23 @@  qla2x00_get_fw_version(scsi_qla_host_t *vha)
 			ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1191,
 			    "%s: Firmware supports Exchange Offload 0x%x\n",
 			    __func__, ha->fw_attributes_h);
+
+		/* bit 26 of fw_attributes */
+		if ((ha->fw_attributes_h & 0x400) && (ql2xnvmeenable)) {
+			struct init_cb_24xx *icb;
+
+			icb = (struct init_cb_24xx *)ha->init_cb;
+			/*
+			 * fw supports nvme and driver load
+			 * parameter requested nvme
+			 */
+			vha->flags.nvme_enabled = 1;
+			icb->firmware_options_2 &=
+			    cpu_to_le32(~(BIT_0 | BIT_1 | BIT_2 | BIT_3));
+			ha->zio_mode = 0;
+			ha->zio_timer = 0;
+		}
+
 	}
 
 	if (IS_QLA27XX(ha)) {
diff --git a/drivers/scsi/qla2xxx/qla_nvme.c b/drivers/scsi/qla2xxx/qla_nvme.c
new file mode 100644
index 000000000000..8d0120a8a6b2
--- /dev/null
+++ b/drivers/scsi/qla2xxx/qla_nvme.c
@@ -0,0 +1,797 @@ 
+/*
+ * QLogic Fibre Channel HBA Driver
+ * Copyright (c)  2003-2017 QLogic Corporation
+ *
+ * See LICENSE.qla2xxx for copyright and licensing details.
+ */
+#include "qla_nvme.h"
+#include "qla_def.h"
+#include <linux/scatterlist.h>
+#include <linux/delay.h>
+#include <linux/nvme.h>
+#include <linux/nvme-fc.h>
+
+static struct nvme_fc_port_template qla_nvme_fc_transport;
+
+#if (IS_ENABLED(CONFIG_NVME_FC))
+extern int nvme_fc_register_remoteport(struct nvme_fc_local_port *,
+    struct nvme_fc_port_info *, struct nvme_fc_remote_port **);
+extern int nvme_fc_register_localport(struct nvme_fc_port_info *,
+    struct nvme_fc_port_template *, struct device *,
+    struct nvme_fc_local_port **);
+extern int nvme_fc_unregister_localport(struct nvme_fc_local_port *);
+#endif
+
+int qla_nvme_register_remote(scsi_qla_host_t *vha, fc_port_t *fcport)
+{
+#if (IS_ENABLED(CONFIG_NVME_FC))
+	struct nvme_rport *rport;
+	int ret;
+
+	if (fcport->nvme_flag & NVME_FLAG_REGISTERED)
+		return 0;
+
+	if (!vha->flags.nvme_enabled) {
+		ql_log(ql_log_info, vha, 0x2100,
+		    "%s: Not registering target since Host NVME is not enabled\n",
+		    __func__);
+		return 0;
+	}
+
+	if (!(fcport->nvme_prli_service_param &
+	    (NVME_PRLI_SP_TARGET | NVME_PRLI_SP_DISCOVERY)))
+		return 0;
+
+	INIT_WORK(&fcport->nvme_del_work, qla_nvme_unregister_remote_port);
+	rport = kzalloc(sizeof(*rport), GFP_KERNEL);
+	if (!rport) {
+		ql_log(ql_log_warn, vha, 0x2101,
+		    "%s: unable to alloc memory\n", __func__);
+		return -ENOMEM;
+	}
+
+	rport->req.port_name = wwn_to_u64(fcport->port_name);
+	rport->req.node_name = wwn_to_u64(fcport->node_name);
+	rport->req.port_role = 0;
+
+	if (fcport->nvme_prli_service_param & NVME_PRLI_SP_INITIATOR)
+		rport->req.port_role = FC_PORT_ROLE_NVME_INITIATOR;
+
+	if (fcport->nvme_prli_service_param & NVME_PRLI_SP_TARGET)
+		rport->req.port_role |= FC_PORT_ROLE_NVME_TARGET;
+
+	if (fcport->nvme_prli_service_param & NVME_PRLI_SP_DISCOVERY)
+		rport->req.port_role |= FC_PORT_ROLE_NVME_DISCOVERY;
+
+	rport->req.port_id = fcport->d_id.b24;
+
+	ql_log(ql_log_info, vha, 0x2102,
+	    "%s: traddr=pn-0x%016llx:nn-0x%016llx PortID:%06x\n",
+	    __func__, rport->req.port_name, rport->req.node_name,
+	    rport->req.port_id);
+
+	ret = nvme_fc_register_remoteport(vha->nvme_local_port, &rport->req,
+	    &fcport->nvme_remote_port);
+	if (ret) {
+		ql_log(ql_log_warn, vha, 0x212e,
+		    "Failed to register remote port. Transport returned %d\n",
+		    ret);
+		return ret;
+	}
+
+	fcport->nvme_remote_port->private = fcport;
+	fcport->nvme_flag |= NVME_FLAG_REGISTERED;
+	atomic_set(&fcport->nvme_ref_count, 1);
+	rport->fcport = fcport;
+	list_add_tail(&rport->list, &vha->nvme_rport_list);
+#endif
+	return 0;
+}
+
+/*
+ * Perform a scan and register those ports with FC-NVMe transport
+ * input:  HBA handle
+ */
+int qla_nvme_hba_scan(scsi_qla_host_t *vha)
+{
+#if (IS_ENABLED(CONFIG_NVME_FC))
+	fc_port_t	*fcport;
+	uint8_t not_found = 1;
+
+	/*
+	 * We are using the first HBA found
+	 * Find matching fcport
+	 */
+	list_for_each_entry(fcport, &vha->vp_fcports, list) {
+		if ((fcport->fc4f_nvme) &&
+		    (!(fcport->nvme_flag & NVME_FLAG_REGISTERED))) {
+			qla_nvme_register_remote(vha, fcport);
+			not_found = 0;
+		}
+	}
+
+	return not_found;
+#else
+	return 1;
+#endif
+}
+
+/* Allocate a queue for NVMe traffic */
+int qla_nvme_alloc_queue(struct nvme_fc_local_port *lport, unsigned int qidx,
+    u16 qsize, void **handle)
+{
+	struct scsi_qla_host *vha;
+	struct qla_hw_data *ha;
+	struct qla_qpair *qpair;
+
+	if (!qidx)
+		qidx++;
+
+	vha = (struct scsi_qla_host *)lport->private;
+	ha = vha->hw;
+
+	ql_log(ql_log_info, vha, 0x2104,
+	    "%s: handle %p, idx =%d, qsize %d\n",
+	    __func__, handle, qidx, qsize);
+
+	if (qidx > qla_nvme_fc_transport.max_hw_queues) {
+		ql_log(ql_log_warn, vha, 0x212f,
+		    "%s: Illegal qidx=%d. Max=%d\n",
+		    __func__, qidx, qla_nvme_fc_transport.max_hw_queues);
+		return -EINVAL;
+	}
+
+	if (ha->queue_pair_map[qidx]) {
+		*handle = ha->queue_pair_map[qidx];
+		ql_log(ql_log_info, vha, 0x2121,
+		    "Returning existing qpair of %p for idx=%x\n",
+		    *handle, qidx);
+		return 0;
+	}
+
+	ql_log(ql_log_warn, vha, 0xffff,
+	    "allocating q for idx=%x w/o cpu mask\n", qidx);
+	qpair = qla2xxx_create_qpair(vha, 5, vha->vp_idx, true);
+	if (qpair == NULL) {
+		ql_log(ql_log_warn, vha, 0x2122,
+		    "Failed to allocate qpair\n");
+		return -EINVAL;
+	}
+	*handle = qpair;
+
+	return 0;
+}
+
+static void qla_nvme_sp_ls_done(void *ptr, int res)
+{
+	srb_t *sp = ptr;
+	struct srb_iocb *nvme;
+	struct nvmefc_ls_req   *fd;
+	struct nvme_private *priv;
+
+	if (atomic_read(&sp->ref_count) == 0) {
+		ql_log(ql_log_warn, sp->fcport->vha, 0x2123,
+		    "SP reference-count to ZERO on LS_done -- sp=%p.\n", sp);
+		return;
+	}
+
+	if (!atomic_dec_and_test(&sp->ref_count))
+		return;
+
+	if (res)
+		res = NVME_SC_FC_TRANSPORT_ERROR;
+
+	nvme = &sp->u.iocb_cmd;
+	fd = nvme->u.nvme.desc;
+	priv = fd->private;
+	priv->comp_status = res;
+	schedule_work(&priv->ls_work);
+	/* work schedule doesn't need the sp */
+	qla2x00_rel_sp(sp);
+}
+
+static void qla_nvme_sp_done(void *ptr, int res)
+{
+	srb_t *sp = ptr;
+	struct srb_iocb *nvme;
+	struct nvmefc_fcp_req *fd;
+
+	nvme = &sp->u.iocb_cmd;
+	fd = nvme->u.nvme.desc;
+
+	if (!atomic_dec_and_test(&sp->ref_count))
+		return;
+
+	if (!(sp->fcport->nvme_flag & NVME_FLAG_REGISTERED))
+		goto rel;
+
+	if (unlikely((nvme->u.nvme.comp_status) || res))
+		fd->status = NVME_SC_FC_TRANSPORT_ABORTED;
+	else
+		fd->status = 0;
+
+	fd->rcv_rsplen = nvme->u.nvme.rsp_pyld_len;
+	(*fd->done)(fd);
+rel:
+	qla2xxx_rel_qpair_sp(sp->qpair, sp);
+}
+
+void qla_nvme_ls_abort(struct nvme_fc_local_port *lport,
+    struct nvme_fc_remote_port *rport, struct nvmefc_ls_req *fd)
+{
+	struct nvme_private *priv = fd->private;
+	fc_port_t *fcport = rport->private;
+	srb_t *sp = priv->sp;
+	int rval;
+	struct qla_hw_data *ha;
+
+	ql_log(ql_log_info, fcport->vha, 0x212b,
+	    "%s: aborting sp:%p on fcport:%p\n", __func__, sp, fcport);
+
+	ha = fcport->vha->hw;
+	rval = ha->isp_ops->abort_command(sp);
+	if (rval != QLA_SUCCESS)
+		ql_log(ql_log_warn, fcport->vha, 0x2125,
+		    "%s: failed to abort LS command for SP:%p rval=%x\n",
+		    __func__, sp, rval);
+}
+
+static void qla_nvme_ls_complete(struct work_struct *work)
+{
+	struct nvme_private *priv =
+	    container_of(work, struct nvme_private, ls_work);
+	struct nvmefc_ls_req *fd = priv->fd;
+
+	(*fd->done)(fd, priv->comp_status);
+}
+
+int qla_nvme_ls_req(struct nvme_fc_local_port *lport,
+    struct nvme_fc_remote_port *rport, struct nvmefc_ls_req *fd)
+{
+	fc_port_t *fcport;
+	struct srb_iocb   *nvme;
+	struct scsi_qla_host *vha;
+	int     rval = QLA_FUNCTION_FAILED;
+	struct qla_hw_data *ha;
+	srb_t           *sp;
+	struct nvme_private *priv;
+
+	if (!fd) {
+		ql_log(ql_log_warn, NULL, 0x2135, "NO NVMe FCP reqeust\n");
+		return rval;
+	}
+
+	priv = fd->private;
+	fcport = (fc_port_t *)rport->private;
+	if (!fcport) {
+		ql_log(ql_log_warn, NULL, 0x212c,
+		    "%s: No fcport ptr\n", __func__);
+		return rval;
+	}
+
+	if (!(fcport->nvme_flag & NVME_FLAG_REGISTERED))
+		return rval;
+
+	vha = fcport->vha;
+	ha = vha->hw;
+	/* Alloc SRB structure */
+	sp = qla2x00_get_sp(vha, fcport, GFP_ATOMIC);
+	if (!sp)
+		return rval;
+
+	sp->type = SRB_NVME_LS;
+	sp->name = "nvme_ls";
+	sp->done = qla_nvme_sp_ls_done;
+	atomic_set(&sp->ref_count, 1);
+	nvme = &sp->u.iocb_cmd;
+	priv->sp = sp;
+	priv->fd = fd;
+	INIT_WORK(&priv->ls_work, qla_nvme_ls_complete);
+	nvme->u.nvme.desc = fd;
+	nvme->u.nvme.dir = 0;
+	nvme->u.nvme.dl = 0;
+	nvme->u.nvme.cmd_len = fd->rqstlen;
+	nvme->u.nvme.rsp_len = fd->rsplen;
+	nvme->u.nvme.rsp_dma = fd->rspdma;
+	nvme->u.nvme.timeout_sec = fd->timeout;
+	nvme->u.nvme.cmd_dma = dma_map_single(&ha->pdev->dev, fd->rqstaddr,
+	    fd->rqstlen, DMA_TO_DEVICE);
+	dma_sync_single_for_device(&ha->pdev->dev, nvme->u.nvme.cmd_dma,
+	    fd->rqstlen, DMA_TO_DEVICE);
+
+	rval = qla2x00_start_sp(sp);
+	if (!rval) {
+		ql_log(ql_log_warn, vha, 0x700e,
+		    "qla2x00_start_sp failed = %d\n", rval);
+		atomic_dec(&sp->ref_count);
+		return rval;
+	}
+
+	return rval;
+}
+
+void qla_nvme_fcp_abort(struct nvme_fc_local_port *lport,
+    struct nvme_fc_remote_port *rport, void *hw_queue_handle,
+    struct nvmefc_fcp_req *fd)
+{
+	struct nvme_private *priv = fd->private;
+	srb_t *sp = priv->sp;
+	int rval;
+	struct qla_hw_data *ha;
+	fc_port_t *fcport = rport->private;
+
+	ql_log(ql_log_info, fcport->vha, 0x2126,
+	    "%s: aborting sp:%p on fcport:%p\n", __func__, sp, fcport);
+
+	ha = fcport->vha->hw;
+	rval = ha->isp_ops->abort_command(sp);
+	if (!rval)
+		ql_log(ql_log_warn, fcport->vha, 0x2127,
+		    "%s: failed to abort command for SP:%p rval=%x\n",
+		    __func__, sp, rval);
+}
+
+void qla_nvme_poll(struct nvme_fc_local_port *lport, void *hw_queue_handle)
+{
+	struct scsi_qla_host *vha = lport->private;
+	unsigned long flags;
+	struct qla_qpair *qpair = (struct qla_qpair *)hw_queue_handle;
+
+	/* Acquire ring specific lock */
+	spin_lock_irqsave(&qpair->qp_lock, flags);
+	qla24xx_process_response_queue(vha, qpair->rsp);
+	spin_unlock_irqrestore(&qpair->qp_lock, flags);
+}
+
+int qla2x00_start_nvme_mq(srb_t *sp)
+{
+	unsigned long   flags;
+	uint32_t        *clr_ptr;
+	uint32_t        index;
+	uint32_t        handle;
+	struct cmd_nvme *cmd_pkt;
+	uint16_t        cnt, i;
+	uint16_t        req_cnt;
+	uint16_t        tot_dsds;
+	uint16_t	avail_dsds;
+	uint32_t	*cur_dsd;
+	struct req_que *req = NULL;
+	struct scsi_qla_host *vha = sp->fcport->vha;
+	struct qla_hw_data *ha = vha->hw;
+	struct qla_qpair *qpair = sp->qpair;
+	struct srb_iocb *nvme = &sp->u.iocb_cmd;
+	struct scatterlist *sgl, *sg;
+	struct nvmefc_fcp_req *fd = nvme->u.nvme.desc;
+
+	/* Setup qpair pointers */
+	req = qpair->req;
+	tot_dsds = fd->sg_cnt;
+
+	/* Acquire qpair specific lock */
+	spin_lock_irqsave(&qpair->qp_lock, flags);
+
+	/* Check for room in outstanding command list. */
+	handle = req->current_outstanding_cmd;
+	for (index = 1; index < req->num_outstanding_cmds; index++) {
+		handle++;
+		if (handle == req->num_outstanding_cmds)
+			handle = 1;
+		if (!req->outstanding_cmds[handle])
+			break;
+	}
+
+	if (index == req->num_outstanding_cmds)
+		goto queuing_error;
+
+	req_cnt = qla24xx_calc_iocbs(vha, tot_dsds);
+	if (req->cnt < (req_cnt + 2)) {
+		cnt = IS_SHADOW_REG_CAPABLE(ha) ? *req->out_ptr :
+		    RD_REG_DWORD_RELAXED(req->req_q_out);
+
+		if (req->ring_index < cnt)
+			req->cnt = cnt - req->ring_index;
+		else
+			req->cnt = req->length - (req->ring_index - cnt);
+
+		if (req->cnt < (req_cnt + 2))
+			goto queuing_error;
+	}
+
+	if (unlikely(!fd->sqid)) {
+		struct nvme_fc_cmd_iu *cmd = fd->cmdaddr;
+		if (cmd->sqe.common.opcode == nvme_admin_async_event) {
+			nvme->u.nvme.aen_op = 1;
+			atomic_inc(&vha->nvme_active_aen_cnt);
+		}
+	}
+
+	/* Build command packet. */
+	req->current_outstanding_cmd = handle;
+	req->outstanding_cmds[handle] = sp;
+	sp->handle = handle;
+	req->cnt -= req_cnt;
+
+	cmd_pkt = (struct cmd_nvme *)req->ring_ptr;
+	cmd_pkt->handle = MAKE_HANDLE(req->id, handle);
+
+	/* Zero out remaining portion of packet. */
+	clr_ptr = (uint32_t *)cmd_pkt + 2;
+	memset(clr_ptr, 0, REQUEST_ENTRY_SIZE - 8);
+
+	cmd_pkt->entry_status = 0;
+
+	/* Update entry type to indicate Command NVME IOCB */
+	cmd_pkt->entry_type = COMMAND_NVME;
+
+	/* No data transfer how do we check buffer len == 0?? */
+	if (fd->io_dir == NVMEFC_FCP_READ) {
+		cmd_pkt->control_flags =
+		    cpu_to_le16(CF_READ_DATA | CF_NVME_ENABLE);
+		vha->qla_stats.input_bytes += fd->payload_length;
+		vha->qla_stats.input_requests++;
+	} else if (fd->io_dir == NVMEFC_FCP_WRITE) {
+		cmd_pkt->control_flags =
+		    cpu_to_le16(CF_WRITE_DATA | CF_NVME_ENABLE);
+		vha->qla_stats.output_bytes += fd->payload_length;
+		vha->qla_stats.output_requests++;
+	} else if (fd->io_dir == 0) {
+		cmd_pkt->control_flags = cpu_to_le16(CF_NVME_ENABLE);
+	}
+
+	/* Set NPORT-ID */
+	cmd_pkt->nport_handle = cpu_to_le16(sp->fcport->loop_id);
+	cmd_pkt->port_id[0] = sp->fcport->d_id.b.al_pa;
+	cmd_pkt->port_id[1] = sp->fcport->d_id.b.area;
+	cmd_pkt->port_id[2] = sp->fcport->d_id.b.domain;
+	cmd_pkt->vp_index = sp->fcport->vha->vp_idx;
+
+	/* NVME RSP IU */
+	cmd_pkt->nvme_rsp_dsd_len = cpu_to_le16(fd->rsplen);
+	cmd_pkt->nvme_rsp_dseg_address[0] = cpu_to_le32(LSD(fd->rspdma));
+	cmd_pkt->nvme_rsp_dseg_address[1] = cpu_to_le32(MSD(fd->rspdma));
+
+	/* NVME CNMD IU */
+	cmd_pkt->nvme_cmnd_dseg_len = cpu_to_le16(fd->cmdlen);
+	cmd_pkt->nvme_cmnd_dseg_address[0] = cpu_to_le32(LSD(fd->cmddma));
+	cmd_pkt->nvme_cmnd_dseg_address[1] = cpu_to_le32(MSD(fd->cmddma));
+
+	cmd_pkt->dseg_count = cpu_to_le16(tot_dsds);
+	cmd_pkt->byte_count = cpu_to_le32(fd->payload_length);
+
+	/* One DSD is available in the Command Type NVME IOCB */
+	avail_dsds = 1;
+	cur_dsd = (uint32_t *)&cmd_pkt->nvme_data_dseg_address[0];
+	sgl = fd->first_sgl;
+
+	/* Load data segments */
+	for_each_sg(sgl, sg, tot_dsds, i) {
+		dma_addr_t      sle_dma;
+		cont_a64_entry_t *cont_pkt;
+
+		/* Allocate additional continuation packets? */
+		if (avail_dsds == 0) {
+			/*
+			 * Five DSDs are available in the Continuation
+			 * Type 1 IOCB.
+			 */
+
+			/* Adjust ring index */
+			req->ring_index++;
+			if (req->ring_index == req->length) {
+				req->ring_index = 0;
+				req->ring_ptr = req->ring;
+			} else {
+				req->ring_ptr++;
+			}
+			cont_pkt = (cont_a64_entry_t *)req->ring_ptr;
+			cont_pkt->entry_type = cpu_to_le32(CONTINUE_A64_TYPE);
+
+			cur_dsd = (uint32_t *)cont_pkt->dseg_0_address;
+			avail_dsds = 5;
+		}
+
+		sle_dma = sg_dma_address(sg);
+		*cur_dsd++ = cpu_to_le32(LSD(sle_dma));
+		*cur_dsd++ = cpu_to_le32(MSD(sle_dma));
+		*cur_dsd++ = cpu_to_le32(sg_dma_len(sg));
+		avail_dsds--;
+	}
+
+	/* Set total entry count. */
+	cmd_pkt->entry_count = (uint8_t)req_cnt;
+	wmb();
+
+	/* Adjust ring index. */
+	req->ring_index++;
+	if (req->ring_index == req->length) {
+		req->ring_index = 0;
+		req->ring_ptr = req->ring;
+	} else {
+		req->ring_ptr++;
+	}
+
+	/* Set chip new ring index. */
+	WRT_REG_DWORD(req->req_q_in, req->ring_index);
+
+queuing_error:
+	spin_unlock_irqrestore(&qpair->qp_lock, flags);
+	return QLA_FUNCTION_FAILED;
+}
+
+/* Post a command */
+int qla_nvme_post_cmd(struct nvme_fc_local_port *lport,
+    struct nvme_fc_remote_port *rport, void *hw_queue_handle,
+    struct nvmefc_fcp_req *fd)
+{
+	fc_port_t *fcport;
+	struct srb_iocb *nvme;
+	struct scsi_qla_host *vha;
+	int rval = QLA_FUNCTION_FAILED;
+	srb_t *sp;
+	struct qla_qpair *qpair = (struct qla_qpair *)hw_queue_handle;
+	struct nvme_private *priv;
+
+	if (!fd) {
+		ql_log(ql_log_warn, NULL, 0x2134, "NO NVMe FCP reqeust\n");
+		return rval;
+	}
+
+	priv = fd->private;
+	fcport = (fc_port_t *)rport->private;
+	if (!fcport) {
+		ql_log(ql_log_warn, NULL, 0x210e, "No fcport ptr\n");
+		return rval;
+	}
+
+	vha = fcport->vha;
+	if ((!qpair) || (!(fcport->nvme_flag & NVME_FLAG_REGISTERED)))
+		return -EBUSY;
+
+	/* Alloc SRB structure */
+	sp = qla2xxx_get_qpair_sp(qpair, fcport, GFP_ATOMIC);
+	if (!sp)
+		return -EIO;
+
+	atomic_set(&sp->ref_count, 1);
+	priv->sp = sp;
+	sp->type = SRB_NVME_CMD;
+	sp->name = "nvme_cmd";
+	sp->done = qla_nvme_sp_done;
+	sp->qpair = qpair;
+	nvme = &sp->u.iocb_cmd;
+
+	nvme->u.nvme.desc = fd;
+
+	rval = qla2x00_start_nvme_mq(sp);
+
+	if (rval != QLA_SUCCESS) {
+		ql_log(ql_log_warn, vha, 0x212d,
+		    "qla2x00_start_nvme_mq failed = %d\n", rval);
+		atomic_dec(&sp->ref_count);
+		return -EIO;
+	}
+
+	return rval;
+}
+
+static void qla_nvme_localport_delete(struct nvme_fc_local_port *lport)
+{
+	struct scsi_qla_host *vha = lport->private;
+
+	ql_log(ql_log_info, vha, 0x210f,
+	    "localport delete of %p completed.\n", vha->nvme_local_port);
+
+	vha->nvme_local_port = NULL;
+	atomic_dec(&vha->nvme_ref_count);
+}
+
+static void qla_nvme_remoteport_delete(struct nvme_fc_remote_port *rport)
+{
+	fc_port_t *fcport;
+	struct nvme_rport *r_port, *trport;
+
+	fcport = (fc_port_t *)rport->private;
+	fcport->nvme_remote_port = NULL;
+	fcport->nvme_flag &= ~NVME_FLAG_REGISTERED;
+	atomic_dec(&fcport->nvme_ref_count);
+
+	ql_log(ql_log_info, fcport->vha, 0x2110,
+	    "remoteport_delete of %p completed.\n", fcport);
+
+	list_for_each_entry_safe(r_port, trport,
+	    &fcport->vha->nvme_rport_list, list) {
+		if (r_port->fcport == fcport) {
+			list_del(&r_port->list);
+			break;
+		}
+	}
+	kfree(r_port);
+}
+
+static struct nvme_fc_port_template qla_nvme_fc_transport = {
+	.localport_delete = qla_nvme_localport_delete,
+	.remoteport_delete = qla_nvme_remoteport_delete,
+	.create_queue   = qla_nvme_alloc_queue,
+	.delete_queue 	= NULL,
+	.ls_req		= qla_nvme_ls_req,
+	.ls_abort	= qla_nvme_ls_abort,
+	.fcp_io		= qla_nvme_post_cmd,
+	.fcp_abort	= qla_nvme_fcp_abort,
+	.poll_queue	= qla_nvme_poll,
+	.max_hw_queues  = 8,
+	.max_sgl_segments = 128,
+	.max_dif_sgl_segments = 64,
+	.dma_boundary = 0xFFFFFFFF,
+	.local_priv_sz  = 8,
+	.remote_priv_sz = 0,
+	.lsrqst_priv_sz = sizeof(struct nvme_private),
+	.fcprqst_priv_sz = sizeof(struct nvme_private),
+};
+
+static int qla_nvme_wait_on_command(srb_t *sp)
+{
+#define NVME_ABORT_POLLING_PERIOD    1000
+#define NVME_ABORT_WAIT_ITER         ((2 * 1000) / (NVME_ABORT_POLLING_PERIOD))
+	unsigned long wait_iter = NVME_ABORT_WAIT_ITER;
+	int ret = QLA_SUCCESS;
+
+	while ((atomic_read(&sp->ref_count) > 1) && wait_iter--)
+		msleep(NVME_ABORT_POLLING_PERIOD);
+
+	if (atomic_read(&sp->ref_count) > 1)
+		ret = QLA_FUNCTION_FAILED;
+
+	return ret;
+}
+
+int qla_nvme_wait_on_rport_del(fc_port_t *fcport)
+{
+	unsigned long wait_iter = NVME_ABORT_WAIT_ITER;
+	int ret = QLA_SUCCESS;
+
+	while (atomic_read(&fcport->nvme_ref_count) && wait_iter--)
+		msleep(NVME_ABORT_POLLING_PERIOD);
+
+	if (atomic_read(&fcport->nvme_ref_count)) {
+		ret = QLA_FUNCTION_FAILED;
+		ql_log(ql_log_info, fcport->vha, 0x2111,
+		    "timed out waiting for fcport=%p to delete\n", fcport);
+	}
+
+	return ret;
+}
+
+void qla_nvme_abort(struct qla_hw_data *ha, srb_t *sp)
+{
+	int rval;
+
+	rval = ha->isp_ops->abort_command(sp);
+	if (!rval) {
+		if (!qla_nvme_wait_on_command(sp))
+			ql_log(ql_log_warn, NULL, 0x2112,
+			    "nvme_wait_on_comand timed out waiting on sp=%p\n",
+			    sp);
+	}
+}
+
+void qla_nvme_abort_all(fc_port_t *fcport)
+{
+	int que, cnt;
+	unsigned long flags;
+	srb_t *sp;
+	struct qla_hw_data *ha = fcport->vha->hw;
+	struct req_que *req;
+
+	spin_lock_irqsave(&ha->hardware_lock, flags);
+	for (que = 0; que < ha->max_req_queues; que++) {
+		req = ha->req_q_map[que];
+		if (!req)
+			continue;
+		if (!req->outstanding_cmds)
+			continue;
+		for (cnt = 1; cnt < req->num_outstanding_cmds; cnt++) {
+			sp = req->outstanding_cmds[cnt];
+			if ((sp) && ((sp->type == SRB_NVME_CMD) ||
+			    (sp->type == SRB_NVME_LS)) &&
+				(sp->fcport == fcport)) {
+				atomic_inc(&sp->ref_count);
+				spin_unlock_irqrestore(&ha->hardware_lock,
+				    flags);
+				qla_nvme_abort(ha, sp);
+				spin_lock_irqsave(&ha->hardware_lock, flags);
+				req->outstanding_cmds[cnt] = NULL;
+				sp->done(sp, 1);
+			}
+		}
+	}
+	spin_unlock_irqrestore(&ha->hardware_lock, flags);
+}
+
+void qla_nvme_unregister_remote_port(struct work_struct *work)
+{
+#if (IS_ENABLED(CONFIG_NVME_FC))
+	struct fc_port *fcport = container_of(work, struct fc_port,
+	    nvme_del_work);
+	struct nvme_rport *rport, *trport;
+
+	list_for_each_entry_safe(rport, trport,
+	    &fcport->vha->nvme_rport_list, list) {
+		if (rport->fcport == fcport) {
+			ql_log(ql_log_info, fcport->vha, 0x2113,
+			    "%s: fcport=%p\n", __func__, fcport);
+			nvme_fc_unregister_remoteport(
+			    fcport->nvme_remote_port);
+		}
+	}
+#endif
+}
+
+void qla_nvme_delete(scsi_qla_host_t *vha)
+{
+#if (IS_ENABLED(CONFIG_NVME_FC))
+	struct nvme_rport *rport, *trport;
+	fc_port_t *fcport;
+
+	if (!list_empty(&vha->nvme_rport_list)) {
+		list_for_each_entry_safe(rport, trport,
+		    &vha->nvme_rport_list, list) {
+			fcport = rport->fcport;
+			ql_log(ql_log_info, fcport->vha, 0x2114,
+			    "%s: fcport=%p\n", __func__, fcport);
+			nvme_fc_unregister_remoteport(
+			    fcport->nvme_remote_port);
+			qla_nvme_wait_on_rport_del(fcport);
+			qla_nvme_abort_all(fcport);
+		}
+	}
+
+	if (vha->nvme_local_port) {
+		ql_log(ql_log_info, vha, 0x2115,
+		    "unregister localport = %p\n", vha->nvme_local_port);
+		nvme_fc_unregister_localport(vha->nvme_local_port);
+	}
+
+	while (atomic_read(&vha->nvme_ref_count))
+		schedule();
+#endif
+}
+
+void qla_nvme_register_hba(scsi_qla_host_t *vha)
+{
+#if (IS_ENABLED(CONFIG_NVME_FC))
+	struct nvme_fc_port_template *tmpl;
+	struct qla_hw_data *ha;
+	struct nvme_fc_port_info pinfo;
+	int ret;
+
+	ha = vha->hw;
+	tmpl = &qla_nvme_fc_transport;
+
+	WARN_ON(vha->nvme_local_port);
+	WARN_ON(ha->max_req_queues < 3);
+
+	qla_nvme_fc_transport.max_hw_queues =
+	    min((uint8_t)(qla_nvme_fc_transport.max_hw_queues),
+		(uint8_t)(ha->max_req_queues - 2));
+
+	pinfo.node_name = wwn_to_u64(vha->node_name);
+	pinfo.port_name = wwn_to_u64(vha->port_name);
+	pinfo.port_role = FC_PORT_ROLE_NVME_INITIATOR;
+	pinfo.port_id = vha->d_id.b24;
+
+	ql_log(ql_log_info, vha, 0xffff,
+	    "register_localport: host-traddr=pn-0x%llx:nn-0x%llx on portID:%x\n",
+	    pinfo.port_name, pinfo.node_name, pinfo.port_id);
+	qla_nvme_fc_transport.dma_boundary = vha->host->dma_boundary;
+
+	ret = nvme_fc_register_localport(&pinfo, tmpl,
+	    get_device(&ha->pdev->dev), &vha->nvme_local_port);
+	if (ret) {
+		ql_log(ql_log_warn, vha, 0xffff,
+		    "register_localport failed: ret=%x\n", ret);
+		return;
+	}
+	atomic_set(&vha->nvme_ref_count, 1);
+	vha->nvme_local_port->private = vha;
+#endif
+}
diff --git a/drivers/scsi/qla2xxx/qla_nvme.h b/drivers/scsi/qla2xxx/qla_nvme.h
new file mode 100644
index 000000000000..dfe56f207b28
--- /dev/null
+++ b/drivers/scsi/qla2xxx/qla_nvme.h
@@ -0,0 +1,132 @@ 
+/*
+ * QLogic Fibre Channel HBA Driver
+ * Copyright (c)  2003-2017 QLogic Corporation
+ *
+ * See LICENSE.qla2xxx for copyright and licensing details.
+ */
+#ifndef __QLA_NVME_H
+#define __QLA_NVME_H
+
+#include <linux/blk-mq.h>
+#include <uapi/scsi/fc/fc_fs.h>
+#include <uapi/scsi/fc/fc_els.h>
+#include <linux/nvme-fc-driver.h>
+
+#define NVME_ATIO_CMD_OFF 32
+#define NVME_FIRST_PACKET_CMDLEN (64 - NVME_ATIO_CMD_OFF)
+#define Q2T_NVME_NUM_TAGS 2048
+#define QLA_MAX_FC_SEGMENTS 64
+
+struct srb;
+struct nvme_private {
+	struct srb	*sp;
+	struct nvmefc_ls_req *fd;
+	struct work_struct ls_work;
+	int comp_status;
+};
+
+struct nvme_rport {
+	struct nvme_fc_port_info req;
+	struct list_head list;
+	struct fc_port *fcport;
+};
+
+#define COMMAND_NVME    0x88            /* Command Type FC-NVMe IOCB */
+struct cmd_nvme {
+	uint8_t entry_type;             /* Entry type. */
+	uint8_t entry_count;            /* Entry count. */
+	uint8_t sys_define;             /* System defined. */
+	uint8_t entry_status;           /* Entry Status. */
+
+	uint32_t handle;                /* System handle. */
+	uint16_t nport_handle;          /* N_PORT handle. */
+	uint16_t timeout;               /* Command timeout. */
+
+	uint16_t dseg_count;            /* Data segment count. */
+	uint16_t nvme_rsp_dsd_len;      /* NVMe RSP DSD length */
+
+	uint64_t rsvd;
+
+	uint16_t control_flags;         /* Control Flags */
+#define CF_NVME_ENABLE                  BIT_9
+#define CF_DIF_SEG_DESCR_ENABLE         BIT_3
+#define CF_DATA_SEG_DESCR_ENABLE        BIT_2
+#define CF_READ_DATA                    BIT_1
+#define CF_WRITE_DATA                   BIT_0
+
+	uint16_t nvme_cmnd_dseg_len;             /* Data segment length. */
+	uint32_t nvme_cmnd_dseg_address[2];      /* Data segment address. */
+	uint32_t nvme_rsp_dseg_address[2];       /* Data segment address. */
+
+	uint32_t byte_count;            /* Total byte count. */
+
+	uint8_t port_id[3];             /* PortID of destination port. */
+	uint8_t vp_index;
+
+	uint32_t nvme_data_dseg_address[2];      /* Data segment address. */
+	uint32_t nvme_data_dseg_len;             /* Data segment length. */
+};
+
+#define PT_LS4_REQUEST 0x89	/* Link Service pass-through IOCB (request) */
+struct pt_ls4_request {
+	uint8_t entry_type;
+	uint8_t entry_count;
+	uint8_t sys_define;
+	uint8_t entry_status;
+	uint32_t handle;
+	uint16_t status;
+	uint16_t nport_handle;
+	uint16_t tx_dseg_count;
+	uint8_t  vp_index;
+	uint8_t  rsvd;
+	uint16_t timeout;
+	uint16_t control_flags;
+#define CF_LS4_SHIFT		13
+#define CF_LS4_ORIGINATOR	0
+#define CF_LS4_RESPONDER	1
+#define CF_LS4_RESPONDER_TERM	2
+
+	uint16_t rx_dseg_count;
+	uint16_t rsvd2;
+	uint32_t exchange_address;
+	uint32_t rsvd3;
+	uint32_t rx_byte_count;
+	uint32_t tx_byte_count;
+	uint32_t dseg0_address[2];
+	uint32_t dseg0_len;
+	uint32_t dseg1_address[2];
+	uint32_t dseg1_len;
+};
+
+#define PT_LS4_UNSOL 0x56	/* pass-up unsolicited rec FC-NVMe request */
+struct pt_ls4_rx_unsol {
+	uint8_t entry_type;
+	uint8_t entry_count;
+	uint16_t rsvd0;
+	uint16_t rsvd1;
+	uint8_t vp_index;
+	uint8_t rsvd2;
+	uint16_t rsvd3;
+	uint16_t nport_handle;
+	uint16_t frame_size;
+	uint16_t rsvd4;
+	uint32_t exchange_address;
+	uint8_t d_id[3];
+	uint8_t r_ctl;
+	uint8_t s_id[3];
+	uint8_t cs_ctl;
+	uint8_t f_ctl[3];
+	uint8_t type;
+	uint16_t seq_cnt;
+	uint8_t df_ctl;
+	uint8_t seq_id;
+	uint16_t rx_id;
+	uint16_t ox_id;
+	uint32_t param;
+	uint32_t desc0;
+#define PT_LS4_PAYLOAD_OFFSET 0x2c
+#define PT_LS4_FIRST_PACKET_LEN 20
+	uint32_t desc_len;
+	uint32_t payload[3];
+};
+#endif
diff --git a/drivers/scsi/qla2xxx/qla_os.c b/drivers/scsi/qla2xxx/qla_os.c
index 59f2d5d8009f..5a588e285836 100644
--- a/drivers/scsi/qla2xxx/qla_os.c
+++ b/drivers/scsi/qla2xxx/qla_os.c
@@ -120,7 +120,11 @@  MODULE_PARM_DESC(ql2xmaxqdepth,
 		"Maximum queue depth to set for each LUN. "
 		"Default is 32.");
 
+#if (IS_ENABLED(CONFIG_NVME_FC))
+int ql2xenabledif;
+#else
 int ql2xenabledif = 2;
+#endif
 module_param(ql2xenabledif, int, S_IRUGO);
 MODULE_PARM_DESC(ql2xenabledif,
 		" Enable T10-CRC-DIF:\n"
@@ -129,6 +133,16 @@  MODULE_PARM_DESC(ql2xenabledif,
 		"  1 -- Enable DIF for all types\n"
 		"  2 -- Enable DIF for all types, except Type 0.\n");
 
+#if (IS_ENABLED(CONFIG_NVME_FC))
+int ql2xnvmeenable = 1;
+#else
+int ql2xnvmeenable;
+#endif
+module_param(ql2xnvmeenable, int, 0644);
+MODULE_PARM_DESC(ql2xnvmeenable,
+    "Enables NVME support"
+    "0 - no NVMe. Default is N");
+
 int ql2xenablehba_err_chk = 2;
 module_param(ql2xenablehba_err_chk, int, S_IRUGO|S_IWUSR);
 MODULE_PARM_DESC(ql2xenablehba_err_chk,
@@ -267,6 +281,7 @@  static void qla2x00_clear_drv_active(struct qla_hw_data *);
 static void qla2x00_free_device(scsi_qla_host_t *);
 static void qla83xx_disable_laser(scsi_qla_host_t *vha);
 static int qla2xxx_map_queues(struct Scsi_Host *shost);
+static void qla2x00_destroy_deferred_work(struct qla_hw_data *);
 
 struct scsi_host_template qla2xxx_driver_template = {
 	.module			= THIS_MODULE,
@@ -695,7 +710,7 @@  qla2x00_sp_free_dma(void *ptr)
 	}
 
 end:
-	if (sp->type != SRB_NVME_CMD) {
+	if ((sp->type != SRB_NVME_CMD) && (sp->type != SRB_NVME_LS)) {
 		CMD_SP(cmd) = NULL;
 		qla2x00_rel_sp(sp);
 	}
@@ -1700,15 +1715,23 @@  qla2x00_abort_all_cmds(scsi_qla_host_t *vha, int res)
 			if (sp) {
 				req->outstanding_cmds[cnt] = NULL;
 				if (sp->cmd_type == TYPE_SRB) {
-					/*
-					 * Don't abort commands in adapter
-					 * during EEH recovery as it's not
-					 * accessible/responding.
-					 */
-					if (GET_CMD_SP(sp) &&
+					if ((sp->type == SRB_NVME_CMD) ||
+					    (sp->type == SRB_NVME_LS)) {
+						sp_get(sp);
+						spin_unlock_irqrestore(
+						    &ha->hardware_lock, flags);
+						qla_nvme_abort(ha, sp);
+						spin_lock_irqsave(
+						    &ha->hardware_lock, flags);
+					} else if (GET_CMD_SP(sp) &&
 					    !ha->flags.eeh_busy &&
 					    (sp->type == SRB_SCSI_CMD)) {
 						/*
+						 * Don't abort commands in
+						 * adapter during EEH
+						 * recovery as it's not
+						 * accessible/responding.
+						 *
 						 * Get a reference to the sp
 						 * and drop the lock. The
 						 * reference ensures this
@@ -3534,6 +3557,9 @@  qla2x00_remove_one(struct pci_dev *pdev)
 		return;
 
 	set_bit(UNLOADING, &base_vha->dpc_flags);
+
+	qla_nvme_delete(base_vha);
+
 	dma_free_coherent(&ha->pdev->dev,
 		base_vha->gnl.size, base_vha->gnl.l, base_vha->gnl.ldma);