diff mbox series

[v1,07/11] xprtrdma: Remove rpcrdma_ia::ri_flags

Message ID 20200221220044.2072.37658.stgit@manet.1015granger.net (mailing list archive)
State New, archived
Headers show
Series NFS/RDMA client side connection overhaul | expand

Commit Message

Chuck Lever Feb. 21, 2020, 10 p.m. UTC
Clean up:
The upper layer serializes calls to xprt_rdma_close, so there is no
need for an atomic bit operation, saving 8 bytes in rpcrdma_ia.

This enables merging rpcrdma_ia_remove directly into the disconnect
logic.

Signed-off-by: Chuck Lever <chuck.lever@oracle.com>
---
 net/sunrpc/xprtrdma/transport.c |   15 -----------
 net/sunrpc/xprtrdma/verbs.c     |   55 +++++++++------------------------------
 net/sunrpc/xprtrdma/xprt_rdma.h |   10 -------
 3 files changed, 13 insertions(+), 67 deletions(-)
diff mbox series

Patch

diff --git a/net/sunrpc/xprtrdma/transport.c b/net/sunrpc/xprtrdma/transport.c
index 745dfd149637..d7b7dab0aeb6 100644
--- a/net/sunrpc/xprtrdma/transport.c
+++ b/net/sunrpc/xprtrdma/transport.c
@@ -385,26 +385,11 @@ 
 void xprt_rdma_close(struct rpc_xprt *xprt)
 {
 	struct rpcrdma_xprt *r_xprt = rpcx_to_rdmax(xprt);
-	struct rpcrdma_ep *ep = &r_xprt->rx_ep;
-	struct rpcrdma_ia *ia = &r_xprt->rx_ia;
-
-	might_sleep();
 
 	trace_xprtrdma_op_close(r_xprt);
 
-	/* Prevent marshaling and sending of new requests */
-	xprt_clear_connected(xprt);
-
-	if (test_and_clear_bit(RPCRDMA_IAF_REMOVING, &ia->ri_flags)) {
-		rpcrdma_ia_remove(ia);
-		goto out;
-	}
-
-	if (ep->rep_connected == -ENODEV)
-		return;
 	rpcrdma_xprt_disconnect(r_xprt);
 
-out:
 	xprt->reestablish_timeout = 0;
 	++xprt->connect_cookie;
 	xprt_disconnect_done(xprt);
diff --git a/net/sunrpc/xprtrdma/verbs.c b/net/sunrpc/xprtrdma/verbs.c
index 3df20f355579..a7f46bbbf017 100644
--- a/net/sunrpc/xprtrdma/verbs.c
+++ b/net/sunrpc/xprtrdma/verbs.c
@@ -250,12 +250,11 @@  static void rpcrdma_update_cm_private(struct rpcrdma_xprt *r_xprt,
 			rpcrdma_addrstr(r_xprt), rpcrdma_portstr(r_xprt));
 #endif
 		init_completion(&ia->ri_remove_done);
-		set_bit(RPCRDMA_IAF_REMOVING, &ia->ri_flags);
 		ep->rep_connected = -ENODEV;
 		xprt_force_disconnect(xprt);
 		wait_for_completion(&ia->ri_remove_done);
+		trace_xprtrdma_remove(r_xprt);
 
-		ia->ri_id = NULL;
 		/* Return 1 to ensure the core destroys the id. */
 		return 1;
 	case RDMA_CM_EVENT_ESTABLISHED:
@@ -345,37 +344,6 @@  static void rpcrdma_update_cm_private(struct rpcrdma_xprt *r_xprt,
  * Exported functions.
  */
 
-/**
- * rpcrdma_ia_remove - Handle device driver unload
- * @ia: interface adapter being removed
- *
- * Divest transport H/W resources associated with this adapter,
- * but allow it to be restored later.
- *
- * Caller must hold the transport send lock.
- */
-void
-rpcrdma_ia_remove(struct rpcrdma_ia *ia)
-{
-	struct rpcrdma_xprt *r_xprt = container_of(ia, struct rpcrdma_xprt,
-						   rx_ia);
-
-	if (ia->ri_id->qp)
-		rpcrdma_xprt_drain(r_xprt);
-
-	rpcrdma_reps_unmap(r_xprt);
-	rpcrdma_reqs_reset(r_xprt);
-	rpcrdma_mrs_destroy(r_xprt);
-	rpcrdma_sendctxs_destroy(r_xprt);
-
-	rpcrdma_ep_destroy(r_xprt);
-
-	/* Allow waiters to continue */
-	complete(&ia->ri_remove_done);
-
-	trace_xprtrdma_remove(r_xprt);
-}
-
 static int rpcrdma_ep_create(struct rpcrdma_xprt *r_xprt)
 {
 	struct rpcrdma_ep *ep = &r_xprt->rx_ep;
@@ -573,12 +541,13 @@  void rpcrdma_xprt_disconnect(struct rpcrdma_xprt *r_xprt)
 	struct rpcrdma_ep *ep = &r_xprt->rx_ep;
 	struct rpcrdma_ia *ia = &r_xprt->rx_ia;
 	struct rdma_cm_id *id = ia->ri_id;
-	int rc;
+	int rc, status = ep->rep_connected;
+
+	might_sleep();
 
 	if (!id)
-		goto out;
+		return;
 
-	/* returns without wait if ID is not connected */
 	rc = rdma_disconnect(id);
 	if (!rc)
 		wait_event_interruptible(ep->rep_connect_wait,
@@ -589,15 +558,17 @@  void rpcrdma_xprt_disconnect(struct rpcrdma_xprt *r_xprt)
 
 	if (id->qp)
 		rpcrdma_xprt_drain(r_xprt);
-out:
+	rpcrdma_reps_unmap(r_xprt);
 	rpcrdma_reqs_reset(r_xprt);
 	rpcrdma_mrs_destroy(r_xprt);
 	rpcrdma_sendctxs_destroy(r_xprt);
 
 	rpcrdma_ep_destroy(r_xprt);
 
-	if (ia->ri_id)
-		rdma_destroy_id(ia->ri_id);
+	if (status == -ENODEV)
+		complete(&ia->ri_remove_done);
+	else
+		rdma_destroy_id(id);
 	ia->ri_id = NULL;
 }
 
@@ -815,10 +786,10 @@  void rpcrdma_mrs_refresh(struct rpcrdma_xprt *r_xprt)
 	struct rpcrdma_buffer *buf = &r_xprt->rx_buf;
 	struct rpcrdma_ep *ep = &r_xprt->rx_ep;
 
-	/* If there is no underlying device, it's no use to
-	 * wake the refresh worker.
+	/* If there is no underlying connection, it's no use
+	 * to wake the refresh worker.
 	 */
-	if (ep->rep_connected != -ENODEV) {
+	if (ep->rep_connected == 1) {
 		/* The work is scheduled on a WQ_MEM_RECLAIM
 		 * workqueue in order to prevent MR allocation
 		 * from recursing into NFS during direct reclaim.
diff --git a/net/sunrpc/xprtrdma/xprt_rdma.h b/net/sunrpc/xprtrdma/xprt_rdma.h
index 8be1b70b71a2..d2a0f125f7a8 100644
--- a/net/sunrpc/xprtrdma/xprt_rdma.h
+++ b/net/sunrpc/xprtrdma/xprt_rdma.h
@@ -75,15 +75,10 @@  struct rpcrdma_ia {
 	unsigned int		ri_max_frwr_depth;
 	bool			ri_implicit_roundup;
 	enum ib_mr_type		ri_mrtype;
-	unsigned long		ri_flags;
 	struct completion	ri_done;
 	struct completion	ri_remove_done;
 };
 
-enum {
-	RPCRDMA_IAF_REMOVING = 0,
-};
-
 /*
  * RDMA Endpoint -- one per transport instance
  */
@@ -455,11 +450,6 @@  struct rpcrdma_xprt {
 extern unsigned int xprt_rdma_memreg_strategy;
 
 /*
- * Interface Adapter calls - xprtrdma/verbs.c
- */
-void rpcrdma_ia_remove(struct rpcrdma_ia *ia);
-
-/*
  * Endpoint calls - xprtrdma/verbs.c
  */
 int rpcrdma_xprt_connect(struct rpcrdma_xprt *r_xprt);