diff mbox series

[1/6] fuse: Kill fasync only if interrupt is queued in queue_interrupt()

Message ID 154149663324.17764.5859568987168525822.stgit@localhost.localdomain (mailing list archive)
State New, archived
Headers show
Series fuse: Interrupt-related optimizations and improvements | expand

Commit Message

Kirill Tkhai Nov. 6, 2018, 9:30 a.m. UTC
We should sent signal only in case of interrupt is really queued.
Not a real problem, but this makes the code clearer and intuitive.

Signed-off-by: Kirill Tkhai <ktkhai@virtuozzo.com>
---
 fs/fuse/dev.c |    6 +++++-
 1 file changed, 5 insertions(+), 1 deletion(-)

Comments

Miklos Szeredi Nov. 7, 2018, 12:45 p.m. UTC | #1
On Tue, Nov 6, 2018 at 10:30 AM, Kirill Tkhai <ktkhai@virtuozzo.com> wrote:
> We should sent signal only in case of interrupt is really queued.
> Not a real problem, but this makes the code clearer and intuitive.
>
> Signed-off-by: Kirill Tkhai <ktkhai@virtuozzo.com>
> ---
>  fs/fuse/dev.c |    6 +++++-
>  1 file changed, 5 insertions(+), 1 deletion(-)
>
> diff --git a/fs/fuse/dev.c b/fs/fuse/dev.c
> index fb2530ed84b3..7705f75c77a3 100644
> --- a/fs/fuse/dev.c
> +++ b/fs/fuse/dev.c
> @@ -468,6 +468,8 @@ static void request_end(struct fuse_conn *fc, struct fuse_req *req)
>
>  static void queue_interrupt(struct fuse_iqueue *fiq, struct fuse_req *req)
>  {
> +       bool kill = false;
> +
>         spin_lock(&fiq->waitq.lock);
>         if (test_bit(FR_FINISHED, &req->flags)) {
>                 spin_unlock(&fiq->waitq.lock);
> @@ -476,9 +478,11 @@ static void queue_interrupt(struct fuse_iqueue *fiq, struct fuse_req *req)
>         if (list_empty(&req->intr_entry)) {
>                 list_add_tail(&req->intr_entry, &fiq->interrupts);
>                 wake_up_locked(&fiq->waitq);
> +               kill = true;
>         }
>         spin_unlock(&fiq->waitq.lock);
> -       kill_fasync(&fiq->fasync, SIGIO, POLL_IN);
> +       if (kill)
> +               kill_fasync(&fiq->fasync, SIGIO, POLL_IN);

All other cases just do the kill_fasync() inside the fiq->waitq.lock
locked region.  That seems the simpler and more readable solution to
this.

Thanks,
Miklos
diff mbox series

Patch

diff --git a/fs/fuse/dev.c b/fs/fuse/dev.c
index fb2530ed84b3..7705f75c77a3 100644
--- a/fs/fuse/dev.c
+++ b/fs/fuse/dev.c
@@ -468,6 +468,8 @@  static void request_end(struct fuse_conn *fc, struct fuse_req *req)
 
 static void queue_interrupt(struct fuse_iqueue *fiq, struct fuse_req *req)
 {
+	bool kill = false;
+
 	spin_lock(&fiq->waitq.lock);
 	if (test_bit(FR_FINISHED, &req->flags)) {
 		spin_unlock(&fiq->waitq.lock);
@@ -476,9 +478,11 @@  static void queue_interrupt(struct fuse_iqueue *fiq, struct fuse_req *req)
 	if (list_empty(&req->intr_entry)) {
 		list_add_tail(&req->intr_entry, &fiq->interrupts);
 		wake_up_locked(&fiq->waitq);
+		kill = true;
 	}
 	spin_unlock(&fiq->waitq.lock);
-	kill_fasync(&fiq->fasync, SIGIO, POLL_IN);
+	if (kill)
+		kill_fasync(&fiq->fasync, SIGIO, POLL_IN);
 }
 
 static void request_wait_answer(struct fuse_conn *fc, struct fuse_req *req)