diff mbox

[v2,1/6] aio: take list removal to (some) callers of aio_complete()

Message ID 20180528175707.10926-1-viro@ZenIV.linux.org.uk (mailing list archive)
State New, archived
Headers show

Commit Message

Al Viro May 28, 2018, 5:57 p.m. UTC
From: Al Viro <viro@zeniv.linux.org.uk>

We really want iocb out of io_cancel(2) reach before we start tearing
it down.

Signed-off-by: Al Viro <viro@zeniv.linux.org.uk>
---
 fs/aio.c | 41 ++++++++++++++++++++++++-----------------
 1 file changed, 24 insertions(+), 17 deletions(-)
diff mbox

Patch

diff --git a/fs/aio.c b/fs/aio.c
index e0b2f183fa1c..f95b167801c2 100644
--- a/fs/aio.c
+++ b/fs/aio.c
@@ -1073,14 +1073,6 @@  static void aio_complete(struct aio_kiocb *iocb, long res, long res2)
 	unsigned tail, pos, head;
 	unsigned long	flags;
 
-	if (!list_empty_careful(&iocb->ki_list)) {
-		unsigned long flags;
-
-		spin_lock_irqsave(&ctx->ctx_lock, flags);
-		list_del(&iocb->ki_list);
-		spin_unlock_irqrestore(&ctx->ctx_lock, flags);
-	}
-
 	/*
 	 * Add a completion event to the ring buffer. Must be done holding
 	 * ctx->completion_lock to prevent other code from messing with the tail
@@ -1402,6 +1394,15 @@  static void aio_complete_rw(struct kiocb *kiocb, long res, long res2)
 {
 	struct aio_kiocb *iocb = container_of(kiocb, struct aio_kiocb, rw);
 
+	if (!list_empty_careful(&iocb->ki_list)) {
+		struct kioctx	*ctx = iocb->ki_ctx;
+		unsigned long flags;
+
+		spin_lock_irqsave(&ctx->ctx_lock, flags);
+		list_del(&iocb->ki_list);
+		spin_unlock_irqrestore(&ctx->ctx_lock, flags);
+	}
+
 	if (kiocb->ki_flags & IOCB_WRITE) {
 		struct inode *inode = file_inode(kiocb->ki_filp);
 
@@ -1594,20 +1595,26 @@  static inline bool __aio_poll_remove(struct poll_iocb *req)
 	return true;
 }
 
-static inline void __aio_poll_complete(struct poll_iocb *req, __poll_t mask)
+static inline void __aio_poll_complete(struct aio_kiocb *iocb, __poll_t mask)
 {
-	struct aio_kiocb *iocb = container_of(req, struct aio_kiocb, poll);
-	struct file *file = req->file;
-
+	fput(iocb->poll.file);
 	aio_complete(iocb, mangle_poll(mask), 0);
-	fput(file);
 }
 
 static void aio_poll_work(struct work_struct *work)
 {
-	struct poll_iocb *req = container_of(work, struct poll_iocb, work);
+	struct aio_kiocb *iocb = container_of(work, struct aio_kiocb, poll.work);
+
+	if (!list_empty_careful(&iocb->ki_list)) {
+		struct kioctx	*ctx = iocb->ki_ctx;
+		unsigned long flags;
+
+		spin_lock_irqsave(&ctx->ctx_lock, flags);
+		list_del(&iocb->ki_list);
+		spin_unlock_irqrestore(&ctx->ctx_lock, flags);
+	}
 
-	__aio_poll_complete(req, req->events);
+	__aio_poll_complete(iocb, iocb->poll.events);
 }
 
 static int aio_poll_cancel(struct kiocb *iocb)
@@ -1658,7 +1665,7 @@  static int aio_poll_wake(struct wait_queue_entry *wait, unsigned mode, int sync,
 		list_del_init(&iocb->ki_list);
 		spin_unlock(&iocb->ki_ctx->ctx_lock);
 
-		__aio_poll_complete(req, mask);
+		__aio_poll_complete(iocb, mask);
 	} else {
 		req->events = mask;
 		INIT_WORK(&req->work, aio_poll_work);
@@ -1710,7 +1717,7 @@  static ssize_t aio_poll(struct aio_kiocb *aiocb, struct iocb *iocb)
 	spin_unlock_irq(&ctx->ctx_lock);
 done:
 	if (mask)
-		__aio_poll_complete(req, mask);
+		__aio_poll_complete(aiocb, mask);
 	return -EIOCBQUEUED;
 out_fail:
 	fput(req->file);