diff mbox series

[5/5] nbd: Fix signal handling

Message ID 20240510202313.25209-6-bvanassche@acm.org (mailing list archive)
State New
Headers show
Series Five nbd patches | expand

Commit Message

Bart Van Assche May 10, 2024, 8:23 p.m. UTC
Both nbd_send_cmd() and nbd_handle_cmd() return either a negative error
number or a positive blk_status_t value. nbd_queue_rq() converts these
return values into a blk_status_t value. There is a bug in the conversion
code: if nbd_send_cmd() returns BLK_STS_RESOURCE, nbd_queue_rq() should
return BLK_STS_RESOURCE instead of BLK_STS_OK. Fix this, move the
conversion code into nbd_handle_cmd() and fix the remaining sparse warnings.

This patch fixes the following sparse warnings:

drivers/block/nbd.c:673:32: warning: incorrect type in return expression (different base types)
drivers/block/nbd.c:673:32:    expected int
drivers/block/nbd.c:673:32:    got restricted blk_status_t [usertype]
drivers/block/nbd.c:714:48: warning: incorrect type in return expression (different base types)
drivers/block/nbd.c:714:48:    expected int
drivers/block/nbd.c:714:48:    got restricted blk_status_t [usertype]
drivers/block/nbd.c:1120:21: warning: incorrect type in assignment (different base types)
drivers/block/nbd.c:1120:21:    expected int [assigned] ret
drivers/block/nbd.c:1120:21:    got restricted blk_status_t [usertype]
drivers/block/nbd.c:1125:16: warning: incorrect type in return expression (different base types)
drivers/block/nbd.c:1125:16:    expected restricted blk_status_t
drivers/block/nbd.c:1125:16:    got int [assigned] ret

Cc: Christoph Hellwig <hch@lst.de>
Cc: Josef Bacik <jbacik@fb.com>
Cc: Yu Kuai <yukuai3@huawei.com>
Cc: Markus Pargmann <mpa@pengutronix.de>
Fixes: fc17b6534eb8 ("blk-mq: switch ->queue_rq return value to blk_status_t")
Cc: stable@vger.kernel.org
Signed-off-by: Bart Van Assche <bvanassche@acm.org>
---
 drivers/block/nbd.c | 28 ++++++++++++++--------------
 1 file changed, 14 insertions(+), 14 deletions(-)

Comments

Christoph Hellwig May 20, 2024, 12:41 p.m. UTC | #1
On Fri, May 10, 2024 at 01:23:13PM -0700, Bart Van Assche wrote:
> Both nbd_send_cmd() and nbd_handle_cmd() return either a negative error
> number or a positive blk_status_t value.

Eww.  Please split these into separate values instead.  There is a reason
why blk_status_t is a separate type with sparse checks, and drivers
really shouldn't do avoid with that for a tiny micro-optimization of
the calling convention (if this even is one and not just the driver
being sloppy).
Bart Van Assche May 20, 2024, 5 p.m. UTC | #2
On 5/20/24 05:41, Christoph Hellwig wrote:
> On Fri, May 10, 2024 at 01:23:13PM -0700, Bart Van Assche wrote:
>> Both nbd_send_cmd() and nbd_handle_cmd() return either a negative error
>> number or a positive blk_status_t value.
> 
> Eww.  Please split these into separate values instead.  There is a reason
> why blk_status_t is a separate type with sparse checks, and drivers
> really shouldn't do avoid with that for a tiny micro-optimization of
> the calling convention (if this even is one and not just the driver
> being sloppy).

How about the (untested) patch below?

Thanks,

Bart.

diff --git a/drivers/block/nbd.c b/drivers/block/nbd.c
index 22a79a62cc4e..4ee76c39e3a5 100644
--- a/drivers/block/nbd.c
+++ b/drivers/block/nbd.c
@@ -588,11 +588,17 @@ static inline int was_interrupted(int result)
  	return result == -ERESTARTSYS || result == -EINTR;
  }

+struct send_res {
+	int result;
+	blk_status_t status;
+};
+
  /*
   * Returns BLK_STS_RESOURCE if the caller should retry after a delay. Returns
   * -EAGAIN if the caller should requeue @cmd. Returns -EIO if sending failed.
   */
-static int nbd_send_cmd(struct nbd_device *nbd, struct nbd_cmd *cmd, int index)
+static struct send_res nbd_send_cmd(struct nbd_device *nbd, struct nbd_cmd *cmd,
+				    int index)
  {
  	struct request *req = blk_mq_rq_from_pdu(cmd);
  	struct nbd_config *config = nbd->config;
@@ -614,13 +620,13 @@ static int nbd_send_cmd(struct nbd_device *nbd, struct nbd_cmd *cmd, int index)

  	type = req_to_nbd_cmd_type(req);
  	if (type == U32_MAX)
-		return -EIO;
+		return (struct send_res){ .result = -EIO };

  	if (rq_data_dir(req) == WRITE &&
  	    (config->flags & NBD_FLAG_READ_ONLY)) {
  		dev_err_ratelimited(disk_to_dev(nbd->disk),
  				    "Write on read-only\n");
-		return -EIO;
+		return (struct send_res){ .result = -EIO };
  	}

  	if (req->cmd_flags & REQ_FUA)
@@ -674,11 +680,11 @@ static int nbd_send_cmd(struct nbd_device *nbd, struct nbd_cmd *cmd, int index)
  				nsock->sent = sent;
  			}
  			set_bit(NBD_CMD_REQUEUED, &cmd->flags);
-			return (__force int)BLK_STS_RESOURCE;
+			return (struct send_res){ .status = BLK_STS_RESOURCE };
  		}
  		dev_err_ratelimited(disk_to_dev(nbd->disk),
  			"Send control failed (result %d)\n", result);
-		return -EAGAIN;
+		return (struct send_res){ .result = -EAGAIN };
  	}
  send_pages:
  	if (type != NBD_CMD_WRITE)
@@ -715,12 +721,14 @@ static int nbd_send_cmd(struct nbd_device *nbd, struct nbd_cmd *cmd, int index)
  					nsock->pending = req;
  					nsock->sent = sent;
  					set_bit(NBD_CMD_REQUEUED, &cmd->flags);
-					return (__force int)BLK_STS_RESOURCE;
+					return (struct send_res){
+						.status = BLK_STS_RESOURCE
+					};
  				}
  				dev_err(disk_to_dev(nbd->disk),
  					"Send data failed (result %d)\n",
  					result);
-				return -EAGAIN;
+				return (struct send_res){ .result = -EAGAIN };
  			}
  			/*
  			 * The completion might already have come in,
@@ -737,7 +745,7 @@ static int nbd_send_cmd(struct nbd_device *nbd, struct nbd_cmd *cmd, int index)
  	trace_nbd_payload_sent(req, handle);
  	nsock->pending = NULL;
  	nsock->sent = 0;
-	return 0;
+	return (struct send_res){};
  }

  static int nbd_read_reply(struct nbd_device *nbd, struct socket *sock,
@@ -1018,7 +1026,8 @@ static blk_status_t nbd_handle_cmd(struct nbd_cmd *cmd, int index)
  	struct nbd_device *nbd = cmd->nbd;
  	struct nbd_config *config;
  	struct nbd_sock *nsock;
-	int ret;
+	struct send_res send_res;
+	blk_status_t ret;

  	lockdep_assert_held(&cmd->lock);

@@ -1076,14 +1085,15 @@ static blk_status_t nbd_handle_cmd(struct nbd_cmd *cmd, int index)
  	 * Some failures are related to the link going down, so anything that
  	 * returns EAGAIN can be retried on a different socket.
  	 */
-	ret = nbd_send_cmd(nbd, cmd, index);
-	/*
-	 * Access to this flag is protected by cmd->lock, thus it's safe to set
-	 * the flag after nbd_send_cmd() succeed to send request to server.
-	 */
-	if (!ret)
+	send_res = nbd_send_cmd(nbd, cmd, index);
+	ret = send_res.result < 0 ? BLK_STS_IOERR : send_res.status;
+	if (ret == BLK_STS_OK) {
+		/*
+		 * cmd->lock is held. Hence, it's safe to set this flag after
+		 * nbd_send_cmd() succeeded sending the request to the server.
+		 */
  		__set_bit(NBD_CMD_INFLIGHT, &cmd->flags);
-	else if (ret == -EAGAIN) {
+	} else if (send_res.result == -EAGAIN) {
  		dev_err_ratelimited(disk_to_dev(nbd->disk),
  				    "Request send failed, requeueing\n");
  		nbd_mark_nsock_dead(nbd, nsock, 1);
@@ -1093,7 +1103,7 @@ static blk_status_t nbd_handle_cmd(struct nbd_cmd *cmd, int index)
  out:
  	mutex_unlock(&nsock->tx_lock);
  	nbd_config_put(nbd);
-	return ret < 0 ? BLK_STS_IOERR : (__force blk_status_t)ret;
+	return ret;
  }

  static blk_status_t nbd_queue_rq(struct blk_mq_hw_ctx *hctx,
diff mbox series

Patch

diff --git a/drivers/block/nbd.c b/drivers/block/nbd.c
index 29e43ab1650c..22a79a62cc4e 100644
--- a/drivers/block/nbd.c
+++ b/drivers/block/nbd.c
@@ -588,6 +588,10 @@  static inline int was_interrupted(int result)
 	return result == -ERESTARTSYS || result == -EINTR;
 }
 
+/*
+ * Returns BLK_STS_RESOURCE if the caller should retry after a delay. Returns
+ * -EAGAIN if the caller should requeue @cmd. Returns -EIO if sending failed.
+ */
 static int nbd_send_cmd(struct nbd_device *nbd, struct nbd_cmd *cmd, int index)
 {
 	struct request *req = blk_mq_rq_from_pdu(cmd);
@@ -670,7 +674,7 @@  static int nbd_send_cmd(struct nbd_device *nbd, struct nbd_cmd *cmd, int index)
 				nsock->sent = sent;
 			}
 			set_bit(NBD_CMD_REQUEUED, &cmd->flags);
-			return BLK_STS_RESOURCE;
+			return (__force int)BLK_STS_RESOURCE;
 		}
 		dev_err_ratelimited(disk_to_dev(nbd->disk),
 			"Send control failed (result %d)\n", result);
@@ -711,7 +715,7 @@  static int nbd_send_cmd(struct nbd_device *nbd, struct nbd_cmd *cmd, int index)
 					nsock->pending = req;
 					nsock->sent = sent;
 					set_bit(NBD_CMD_REQUEUED, &cmd->flags);
-					return BLK_STS_RESOURCE;
+					return (__force int)BLK_STS_RESOURCE;
 				}
 				dev_err(disk_to_dev(nbd->disk),
 					"Send data failed (result %d)\n",
@@ -1008,7 +1012,7 @@  static int wait_for_reconnect(struct nbd_device *nbd)
 	return !test_bit(NBD_RT_DISCONNECTED, &config->runtime_flags);
 }
 
-static int nbd_handle_cmd(struct nbd_cmd *cmd, int index)
+static blk_status_t nbd_handle_cmd(struct nbd_cmd *cmd, int index)
 {
 	struct request *req = blk_mq_rq_from_pdu(cmd);
 	struct nbd_device *nbd = cmd->nbd;
@@ -1022,14 +1026,14 @@  static int nbd_handle_cmd(struct nbd_cmd *cmd, int index)
 	if (!config) {
 		dev_err_ratelimited(disk_to_dev(nbd->disk),
 				    "Socks array is empty\n");
-		return -EINVAL;
+		return BLK_STS_IOERR;
 	}
 
 	if (index >= config->num_connections) {
 		dev_err_ratelimited(disk_to_dev(nbd->disk),
 				    "Attempted send on invalid socket\n");
 		nbd_config_put(nbd);
-		return -EINVAL;
+		return BLK_STS_IOERR;
 	}
 	cmd->status = BLK_STS_OK;
 again:
@@ -1052,7 +1056,7 @@  static int nbd_handle_cmd(struct nbd_cmd *cmd, int index)
 			 */
 			sock_shutdown(nbd);
 			nbd_config_put(nbd);
-			return -EIO;
+			return BLK_STS_IOERR;
 		}
 		goto again;
 	}
@@ -1065,7 +1069,7 @@  static int nbd_handle_cmd(struct nbd_cmd *cmd, int index)
 	blk_mq_start_request(req);
 	if (unlikely(nsock->pending && nsock->pending != req)) {
 		nbd_requeue_cmd(cmd);
-		ret = 0;
+		ret = BLK_STS_OK;
 		goto out;
 	}
 	/*
@@ -1084,19 +1088,19 @@  static int nbd_handle_cmd(struct nbd_cmd *cmd, int index)
 				    "Request send failed, requeueing\n");
 		nbd_mark_nsock_dead(nbd, nsock, 1);
 		nbd_requeue_cmd(cmd);
-		ret = 0;
+		ret = BLK_STS_OK;
 	}
 out:
 	mutex_unlock(&nsock->tx_lock);
 	nbd_config_put(nbd);
-	return ret;
+	return ret < 0 ? BLK_STS_IOERR : (__force blk_status_t)ret;
 }
 
 static blk_status_t nbd_queue_rq(struct blk_mq_hw_ctx *hctx,
 			const struct blk_mq_queue_data *bd)
 {
 	struct nbd_cmd *cmd = blk_mq_rq_to_pdu(bd->rq);
-	int ret;
+	blk_status_t ret;
 
 	/*
 	 * Since we look at the bio's to send the request over the network we
@@ -1116,10 +1120,6 @@  static blk_status_t nbd_queue_rq(struct blk_mq_hw_ctx *hctx,
 	 * appropriate.
 	 */
 	ret = nbd_handle_cmd(cmd, hctx->queue_num);
-	if (ret < 0)
-		ret = BLK_STS_IOERR;
-	else if (!ret)
-		ret = BLK_STS_OK;
 	mutex_unlock(&cmd->lock);
 
 	return ret;