diff mbox series

[10/10] nvme: add separate handling for user integrity buffer

Message ID 20240425183943.6319-11-joshi.k@samsung.com (mailing list archive)
State New, archived
Headers show
Series [01/10] block: set bip_vcnt correctly | expand

Commit Message

Kanchan Joshi April 25, 2024, 6:39 p.m. UTC
For user provided integrity buffer, convert bip flags
(guard/reftag/apptag checks) to protocol specific flags.
Also pass apptag and reftag down.

Signed-off-by: Kanchan Joshi <joshi.k@samsung.com>
---
 drivers/nvme/host/core.c | 36 +++++++++++++++++++++++++++++++++++-
 1 file changed, 35 insertions(+), 1 deletion(-)

Comments

Keith Busch April 25, 2024, 7:56 p.m. UTC | #1
On Fri, Apr 26, 2024 at 12:09:43AM +0530, Kanchan Joshi wrote:
> @@ -983,6 +1009,14 @@ static inline blk_status_t nvme_setup_rw(struct nvme_ns *ns,
>  			if (WARN_ON_ONCE(!nvme_ns_has_pi(ns->head)))
>  				return BLK_STS_NOTSUPP;
>  			control |= NVME_RW_PRINFO_PRACT;
> +		} else {
> +			/* process user-created integrity */
> +			if (bio_integrity(req->bio)->bip_flags &
> +					BIP_INTEGRITY_USER) {

Make this an "else if" instead of nesting it an extra level.

> +				nvme_setup_user_integrity(ns, req, cmnd,
> +							  &control);
> +				goto out;
> +			}

And this can be structured a little differently so that you don't need
the "goto"; IMO, goto is good for error unwinding, but using it in a
good path harms readablilty.

This is getting complex enough that splitting it off in a helper
funciton, maybe nvme_setup_rw_meta(), might be a good idea.
kernel test robot April 26, 2024, 10:57 a.m. UTC | #2
Hi Kanchan,

kernel test robot noticed the following build errors:

[auto build test ERROR on 24c3fc5c75c5b9d471783b4a4958748243828613]

url:    https://github.com/intel-lab-lkp/linux/commits/Kanchan-Joshi/block-set-bip_vcnt-correctly/20240426-024916
base:   24c3fc5c75c5b9d471783b4a4958748243828613
patch link:    https://lore.kernel.org/r/20240425183943.6319-11-joshi.k%40samsung.com
patch subject: [PATCH 10/10] nvme: add separate handling for user integrity buffer
config: x86_64-kexec (https://download.01.org/0day-ci/archive/20240426/202404261859.n3J0awuF-lkp@intel.com/config)
compiler: clang version 18.1.4 (https://github.com/llvm/llvm-project e6c3289804a67ea0bb6a86fadbe454dd93b8d855)
reproduce (this is a W=1 build): (https://download.01.org/0day-ci/archive/20240426/202404261859.n3J0awuF-lkp@intel.com/reproduce)

If you fix the issue in a separate patch/commit (i.e. not just a new version of
the same patch/commit), kindly add following tags
| Reported-by: kernel test robot <lkp@intel.com>
| Closes: https://lore.kernel.org/oe-kbuild-all/202404261859.n3J0awuF-lkp@intel.com/

All errors (new ones prefixed by >>):

>> drivers/nvme/host/core.c:1014:31: error: member reference base type 'void' is not a structure or union
    1014 |                         if (bio_integrity(req->bio)->bip_flags &
         |                             ~~~~~~~~~~~~~~~~~~~~~~~^ ~~~~~~~~~
   1 error generated.


vim +/void +1014 drivers/nvme/host/core.c

   971	
   972	static inline blk_status_t nvme_setup_rw(struct nvme_ns *ns,
   973			struct request *req, struct nvme_command *cmnd,
   974			enum nvme_opcode op)
   975	{
   976		u16 control = 0;
   977		u32 dsmgmt = 0;
   978	
   979		if (req->cmd_flags & REQ_FUA)
   980			control |= NVME_RW_FUA;
   981		if (req->cmd_flags & (REQ_FAILFAST_DEV | REQ_RAHEAD))
   982			control |= NVME_RW_LR;
   983	
   984		if (req->cmd_flags & REQ_RAHEAD)
   985			dsmgmt |= NVME_RW_DSM_FREQ_PREFETCH;
   986	
   987		cmnd->rw.opcode = op;
   988		cmnd->rw.flags = 0;
   989		cmnd->rw.nsid = cpu_to_le32(ns->head->ns_id);
   990		cmnd->rw.cdw2 = 0;
   991		cmnd->rw.cdw3 = 0;
   992		cmnd->rw.metadata = 0;
   993		cmnd->rw.slba =
   994			cpu_to_le64(nvme_sect_to_lba(ns->head, blk_rq_pos(req)));
   995		cmnd->rw.length =
   996			cpu_to_le16((blk_rq_bytes(req) >> ns->head->lba_shift) - 1);
   997		cmnd->rw.reftag = 0;
   998		cmnd->rw.apptag = 0;
   999		cmnd->rw.appmask = 0;
  1000	
  1001		if (ns->head->ms) {
  1002			/*
  1003			 * If formated with metadata, the block layer always provides a
  1004			 * metadata buffer if CONFIG_BLK_DEV_INTEGRITY is enabled.  Else
  1005			 * we enable the PRACT bit for protection information or set the
  1006			 * namespace capacity to zero to prevent any I/O.
  1007			 */
  1008			if (!blk_integrity_rq(req)) {
  1009				if (WARN_ON_ONCE(!nvme_ns_has_pi(ns->head)))
  1010					return BLK_STS_NOTSUPP;
  1011				control |= NVME_RW_PRINFO_PRACT;
  1012			} else {
  1013				/* process user-created integrity */
> 1014				if (bio_integrity(req->bio)->bip_flags &
  1015						BIP_INTEGRITY_USER) {
  1016					nvme_setup_user_integrity(ns, req, cmnd,
  1017								  &control);
  1018					goto out;
  1019				}
  1020			}
  1021	
  1022			switch (ns->head->pi_type) {
  1023			case NVME_NS_DPS_PI_TYPE3:
  1024				control |= NVME_RW_PRINFO_PRCHK_GUARD;
  1025				break;
  1026			case NVME_NS_DPS_PI_TYPE1:
  1027			case NVME_NS_DPS_PI_TYPE2:
  1028				control |= NVME_RW_PRINFO_PRCHK_GUARD |
  1029						NVME_RW_PRINFO_PRCHK_REF;
  1030				if (op == nvme_cmd_zone_append)
  1031					control |= NVME_RW_APPEND_PIREMAP;
  1032				nvme_set_ref_tag(ns, cmnd, req);
  1033				break;
  1034			}
  1035		}
  1036	out:
  1037		cmnd->rw.control = cpu_to_le16(control);
  1038		cmnd->rw.dsmgmt = cpu_to_le32(dsmgmt);
  1039		return 0;
  1040	}
  1041
diff mbox series

Patch

diff --git a/drivers/nvme/host/core.c b/drivers/nvme/host/core.c
index 27281a9a8951..3b719be4eedb 100644
--- a/drivers/nvme/host/core.c
+++ b/drivers/nvme/host/core.c
@@ -886,6 +886,13 @@  static blk_status_t nvme_setup_discard(struct nvme_ns *ns, struct request *req,
 	return BLK_STS_OK;
 }
 
+static void nvme_set_app_tag(struct nvme_command *cmnd, u16 apptag)
+{
+	cmnd->rw.apptag = cpu_to_le16(apptag);
+	/* use 0xfff as mask so that apptag is used in entirety*/
+	cmnd->rw.appmask = cpu_to_le16(0xffff);
+}
+
 static void nvme_set_ref_tag(struct nvme_ns *ns, struct nvme_command *cmnd,
 			      struct request *req)
 {
@@ -943,6 +950,25 @@  static inline blk_status_t nvme_setup_write_zeroes(struct nvme_ns *ns,
 	return BLK_STS_OK;
 }
 
+static inline void nvme_setup_user_integrity(struct nvme_ns *ns,
+		struct request *req, struct nvme_command *cmnd,
+		u16 *control)
+{
+	struct bio_integrity_payload *bip = bio_integrity(req->bio);
+	unsigned short bip_flags = bip->bip_flags;
+
+	if (bip_flags & BIP_USER_CHK_GUARD)
+		*control |= NVME_RW_PRINFO_PRCHK_GUARD;
+	if (bip_flags & BIP_USER_CHK_REFTAG) {
+		*control |= NVME_RW_PRINFO_PRCHK_REF;
+		nvme_set_ref_tag(ns, cmnd, req);
+	}
+	if (bip_flags & BIP_USER_CHK_APPTAG) {
+		*control |= NVME_RW_PRINFO_PRCHK_APP;
+		nvme_set_app_tag(cmnd, bip->apptag);
+	}
+}
+
 static inline blk_status_t nvme_setup_rw(struct nvme_ns *ns,
 		struct request *req, struct nvme_command *cmnd,
 		enum nvme_opcode op)
@@ -983,6 +1009,14 @@  static inline blk_status_t nvme_setup_rw(struct nvme_ns *ns,
 			if (WARN_ON_ONCE(!nvme_ns_has_pi(ns->head)))
 				return BLK_STS_NOTSUPP;
 			control |= NVME_RW_PRINFO_PRACT;
+		} else {
+			/* process user-created integrity */
+			if (bio_integrity(req->bio)->bip_flags &
+					BIP_INTEGRITY_USER) {
+				nvme_setup_user_integrity(ns, req, cmnd,
+							  &control);
+				goto out;
+			}
 		}
 
 		switch (ns->head->pi_type) {
@@ -999,7 +1033,7 @@  static inline blk_status_t nvme_setup_rw(struct nvme_ns *ns,
 			break;
 		}
 	}
-
+out:
 	cmnd->rw.control = cpu_to_le16(control);
 	cmnd->rw.dsmgmt = cpu_to_le32(dsmgmt);
 	return 0;