diff mbox series

[v4,05/30] xprtrdma: No qp_event disconnect

Message ID 20181217163948.24133.60342.stgit@manet.1015granger.net (mailing list archive)
State New, archived
Headers show
Series NFS/RDMA client for next | expand

Commit Message

Chuck Lever Dec. 17, 2018, 4:39 p.m. UTC
After thinking about this more, and auditing other kernel ULP imple-
mentations, I believe that a DISCONNECT cm_event will occur after a
fatal QP event. If that's the case, there's no need for an explicit
disconnect in the QP event handler.

Signed-off-by: Chuck Lever <chuck.lever@oracle.com>
---
 net/sunrpc/xprtrdma/verbs.c     |   32 --------------------------------
 net/sunrpc/xprtrdma/xprt_rdma.h |    1 -
 2 files changed, 33 deletions(-)
diff mbox series

Patch

diff --git a/net/sunrpc/xprtrdma/verbs.c b/net/sunrpc/xprtrdma/verbs.c
index cff3a5d..9a0a765 100644
--- a/net/sunrpc/xprtrdma/verbs.c
+++ b/net/sunrpc/xprtrdma/verbs.c
@@ -100,25 +100,6 @@  static void rpcrdma_xprt_drain(struct rpcrdma_xprt *r_xprt)
 }
 
 /**
- * rpcrdma_disconnect_worker - Force a disconnect
- * @work: endpoint to be disconnected
- *
- * Provider callbacks can possibly run in an IRQ context. This function
- * is invoked in a worker thread to guarantee that disconnect wake-up
- * calls are always done in process context.
- */
-static void
-rpcrdma_disconnect_worker(struct work_struct *work)
-{
-	struct rpcrdma_ep *ep = container_of(work, struct rpcrdma_ep,
-					     rep_disconnect_worker.work);
-	struct rpcrdma_xprt *r_xprt =
-		container_of(ep, struct rpcrdma_xprt, rx_ep);
-
-	xprt_force_disconnect(&r_xprt->rx_xprt);
-}
-
-/**
  * rpcrdma_qp_event_handler - Handle one QP event (error notification)
  * @event: details of the event
  * @context: ep that owns QP where event occurred
@@ -134,15 +115,6 @@  static void rpcrdma_xprt_drain(struct rpcrdma_xprt *r_xprt)
 						   rx_ep);
 
 	trace_xprtrdma_qp_event(r_xprt, event);
-	pr_err("rpcrdma: %s on device %s connected to %s:%s\n",
-	       ib_event_msg(event->event), event->device->name,
-	       rpcrdma_addrstr(r_xprt), rpcrdma_portstr(r_xprt));
-
-	if (ep->rep_connected == 1) {
-		ep->rep_connected = -EIO;
-		schedule_delayed_work(&ep->rep_disconnect_worker, 0);
-		wake_up_all(&ep->rep_connect_wait);
-	}
 }
 
 /**
@@ -571,8 +543,6 @@  static void rpcrdma_xprt_drain(struct rpcrdma_xprt *r_xprt)
 				   cdata->max_requests >> 2);
 	ep->rep_send_count = ep->rep_send_batch;
 	init_waitqueue_head(&ep->rep_connect_wait);
-	INIT_DELAYED_WORK(&ep->rep_disconnect_worker,
-			  rpcrdma_disconnect_worker);
 	ep->rep_receive_count = 0;
 
 	sendcq = ib_alloc_cq(ia->ri_device, NULL,
@@ -646,8 +616,6 @@  static void rpcrdma_xprt_drain(struct rpcrdma_xprt *r_xprt)
 void
 rpcrdma_ep_destroy(struct rpcrdma_ep *ep, struct rpcrdma_ia *ia)
 {
-	cancel_delayed_work_sync(&ep->rep_disconnect_worker);
-
 	if (ia->ri_id && ia->ri_id->qp) {
 		rpcrdma_ep_disconnect(ep, ia);
 		rdma_destroy_qp(ia->ri_id);
diff --git a/net/sunrpc/xprtrdma/xprt_rdma.h b/net/sunrpc/xprtrdma/xprt_rdma.h
index 3f198cd..7c1b519 100644
--- a/net/sunrpc/xprtrdma/xprt_rdma.h
+++ b/net/sunrpc/xprtrdma/xprt_rdma.h
@@ -101,7 +101,6 @@  struct rpcrdma_ep {
 	wait_queue_head_t 	rep_connect_wait;
 	struct rpcrdma_connect_private	rep_cm_private;
 	struct rdma_conn_param	rep_remote_cma;
-	struct delayed_work	rep_disconnect_worker;
 	int			rep_receive_count;
 };