@@ -785,6 +785,18 @@ _ctl_do_mpt_command(struct MPT3SAS_ADAPTER *ioc, struct mpt3_ioctl_command karg,
case MPI2_FUNCTION_NVME_ENCAPSULATED:
{
nvme_encap_request = (Mpi26NVMeEncapsulatedRequest_t *)request;
+ if (!ioc->pcie_sg_lookup) {
+ dtmprintk(ioc, ioc_info(ioc,
+ "HBA doesn't supports NVMe. Hence rejecting NVMe Encapsulated request.\n"
+ ));
+
+ if (ioc->logging_level & MPT_DEBUG_TM)
+ _debug_dump_mf(nvme_encap_request,
+ ioc->request_sz/4);
+ mpt3sas_base_free_smid(ioc, smid);
+ ret = -EINVAL;
+ goto out;
+ }
/*
* Get the Physical Address of the sense buffer.
* Use Error Response buffer address field to hold the sense
If any faulty application issues an NVMe Encapsulated commands to HBA which doesn't support NVMe protocol then driver should return the command as invalid with below message. "HBA doesn't supports NVMe. Hence rejecting NVMe Encapsulated request." Otherwise below page fault kernel panic will be observed while building the PRPs as their is no PRP pools allocated for the HBA which doesn't support NVMe drives. RIP: 0010:_base_build_nvme_prp+0x3b/0xf0 [mpt3sas] Call Trace: _ctl_do_mpt_command+0x931/0x1120 [mpt3sas] _ctl_ioctl_main.isra.11+0xa28/0x11e0 [mpt3sas] ? prepare_to_wait+0xb0/0xb0 ? tty_ldisc_deref+0x16/0x20 _ctl_ioctl+0x1a/0x20 [mpt3sas] do_vfs_ioctl+0xaa/0x620 ? vfs_read+0x117/0x140 ksys_ioctl+0x67/0x90 __x64_sys_ioctl+0x1a/0x20 do_syscall_64+0x60/0x190 entry_SYSCALL_64_after_hwframe+0x44/0xa9 Signed-off-by: Sreekanth Reddy <sreekanth.reddy@broadcom.com> --- drivers/scsi/mpt3sas/mpt3sas_ctl.c | 12 ++++++++++++ 1 file changed, 12 insertions(+)