diff mbox series

io_uring: fix the bug of child process can't do io task

Message ID 20200915130245.89585-1-zhuyinyin@bytedance.com (mailing list archive)
State New, archived
Headers show
Series io_uring: fix the bug of child process can't do io task | expand

Commit Message

朱寅寅 Sept. 15, 2020, 1:02 p.m. UTC
when parent process setup a io_uring_instance, the ctx->sqo_mm was
assigned of parent process'mm. Then it fork a child
process. So the child process inherits the io_uring_instance fd from
parent process. Then the child process submit a io task to the io_uring
instance. The kworker will do the io task actually, and use
the ctx->sqo_mm as its mm, but this ctx->sqo_mm is parent process's mm,
not the child process's mm. so child do the io task unsuccessfully. To
fix this bug, when a process submit a io task to the kworker, assign the
ctx->sqo_mm with this process's mm.

Signed-off-by: Yinyin Zhu <zhuyinyin@bytedance.com>
---
 fs/io_uring.c | 2 +-
 1 file changed, 1 insertion(+), 1 deletion(-)

Comments

朱寅寅 Sept. 16, 2020, 3:53 a.m. UTC | #1
Dear Jens Axboe:

  Thanks for your reply.  Yeah,My approach indeed has a problem as you
said. Yours is obviously better than mine.
But I think your approach is not perfect , still has a problem. Think
that same case I mentioned before, when parent
process Setup an io_uring instance with the flag of
IORING_SETUP_SQPOLL, means the sqo_thread is created,
and ctx->sqo_mm is assigned to the parent process's mm. Then the
parent process forks a child process. Of course ,
the child process inherits the fd of io_uring instance.

  Then the child process submits  an io task without ever context
switching into kernel. So the sqo_thead even doesn't
know whether the parent process has submit the io task or the child
one,  it just use the ctx->sqo_mm as its mm,
but ctx->sqo_mm is the parent process's, not child process's,  so the
problem occurred again.

Two more things:
  1、I think 5.9 also has this problem  -- "sqo_thread doesn't know who
has submit the io task, it will use wrong mm"

  2、Attachment of this mail is the test application. If the child
process exits quickly, the problem is occured.


On Tue, Sep 15, 2020 at 9:36 PM Jens Axboe <axboe@kernel.dk> wrote:
>
> On 9/15/20 7:25 AM, Jens Axboe wrote:
> > On 9/15/20 7:02 AM, Yinyin Zhu wrote:
> >> when parent process setup a io_uring_instance, the ctx->sqo_mm was
> >> assigned of parent process'mm. Then it fork a child
> >> process. So the child process inherits the io_uring_instance fd from
> >> parent process. Then the child process submit a io task to the io_uring
> >> instance. The kworker will do the io task actually, and use
> >> the ctx->sqo_mm as its mm, but this ctx->sqo_mm is parent process's mm,
> >> not the child process's mm. so child do the io task unsuccessfully. To
> >> fix this bug, when a process submit a io task to the kworker, assign the
> >> ctx->sqo_mm with this process's mm.
> >
> > Hmm, what's the test case for this? There's a 5.9 regression where we
> > don't always grab the right context for certain linked cases, below
> > is the fix. Does that fix your case?
>
> Ah hang on, you're on the 5.4 code base... I think this is a better
> approach. Any chance you can test it?
>
> The problem with yours is that you can have multiple pending async
> ones, and you can't just re-assign ctx->sqo_mm. That one should only
> be used by the SQPOLL thread.
>
>
> diff --git a/fs/io_uring.c b/fs/io_uring.c
> index 2a539b794f3b..e8a4b4ae7006 100644
> --- a/fs/io_uring.c
> +++ b/fs/io_uring.c
> @@ -514,7 +514,7 @@ static inline void io_queue_async_work(struct io_ring_ctx *ctx,
>                 }
>         }
>
> -       req->task = current;
> +       req->task = get_task_struct(current);
>
>         spin_lock_irqsave(&ctx->task_lock, flags);
>         list_add(&req->task_list, &ctx->task_list);
> @@ -1832,6 +1832,7 @@ static void io_poll_complete_work(struct work_struct *work)
>         spin_unlock_irq(&ctx->completion_lock);
>
>         io_cqring_ev_posted(ctx);
> +       put_task_struct(req->task);
>         io_put_req(req);
>  out:
>         revert_creds(old_cred);
> @@ -2234,11 +2235,11 @@ static void io_sq_wq_submit_work(struct work_struct *work)
>
>                 ret = 0;
>                 if (io_req_needs_user(req) && !cur_mm) {
> -                       if (!mmget_not_zero(ctx->sqo_mm)) {
> +                       if (!mmget_not_zero(req->task->mm)) {
>                                 ret = -EFAULT;
>                                 goto end_req;
>                         } else {
> -                               cur_mm = ctx->sqo_mm;
> +                               cur_mm = req->task->mm;
>                                 use_mm(cur_mm);
>                                 old_fs = get_fs();
>                                 set_fs(USER_DS);
> @@ -2275,6 +2276,7 @@ static void io_sq_wq_submit_work(struct work_struct *work)
>                 }
>
>                 /* drop submission reference */
> +               put_task_struct(req->task);
>                 io_put_req(req);
>
>                 if (ret) {
>
> --
> Jens Axboe
>
/* SPDX-License-Identifier: MIT */
/*
 * Simple test case showing using sendmsg and recvmsg through io_uring
 */
#include <stdio.h>
#include <stdlib.h>
#include <string.h>
#include <unistd.h>
#include <errno.h>
#include <arpa/inet.h>
#include <sys/types.h>
#include <sys/socket.h>
#include <pthread.h>
#include "liburing.h"
static const char *str = "This is a test of sendmsg and recvmsg over io_uring!";
#define MAX_MSG 128
#define PORT    10200
#define HOST    "127.0.0.1"


static int recv_prep(struct io_uring *ring, struct iovec *iov)
{
    struct sockaddr_in saddr;
    struct msghdr msg;
    struct io_uring_sqe *sqe = NULL;
    int sockfd, ret;
    int val = 1;
    memset(&saddr, 0, sizeof(saddr));
    saddr.sin_family = AF_INET;
    saddr.sin_addr.s_addr = htonl(INADDR_ANY);
    saddr.sin_port = htons(PORT);
    sockfd = socket(AF_INET, SOCK_DGRAM, 0);
    if (sockfd < 0) {
        perror("socket");
        return 1;
    }
    val = 1;
    setsockopt(sockfd, SOL_SOCKET, SO_REUSEPORT, &val, sizeof(val));
    setsockopt(sockfd, SOL_SOCKET, SO_REUSEADDR, &val, sizeof(val));
    ret = bind(sockfd, (struct sockaddr *)&saddr, sizeof(saddr));
    if (ret < 0) {
        perror("bind");
        goto err;
    }
    memset(&msg, 0, sizeof(msg));
        msg.msg_namelen = sizeof(struct sockaddr_in);
    msg.msg_iov = iov;
    msg.msg_iovlen = 1;
    sqe = io_uring_get_sqe(ring);
    if (!sqe)
	    printf("sqe is null \r\n");
    io_uring_prep_recvmsg(sqe, sockfd, &msg, 0);

    ret = io_uring_submit_and_wait(ring, 1);
    if (ret <= 0) {
        printf("submit failed: %d\n", ret);
        goto err;
    }
    printf("ret is %d\r\n", ret);
    close(sockfd);
    return 0;
err:
    close(sockfd);
    return 1;
}

int main(int argc, char *argv[])
{
    pid_t p;
    char buf[MAX_MSG + 1];
    struct iovec iov = {
        .iov_base = buf,
        .iov_len = sizeof(buf) - 1,
    };
    struct io_uring_sqe *sqe;
    struct io_uring_cqe *cqe;
    struct io_uring ring;
    int ret;
    ret = io_uring_queue_init(1, &ring, 0);
    if (ret) {
        printf("queue init failed: %d\n", ret);
        return 0;
    }

    p = fork();
    switch (p) {
    case -1:
        perror("fork");
        exit(2);
    case 0: {//child
        recv_prep(&ring, &iov);
        //sleep(500000);
	break;
        }
    default:
	printf("pid: %d\n", p);
	//recv_prep(&ring, &iov);
        sleep(50000);
        return 0;

    }
}
diff mbox series

Patch

diff --git a/fs/io_uring.c b/fs/io_uring.c
index f115fff39cf4..f5d6bd54a625 100644
--- a/fs/io_uring.c
+++ b/fs/io_uring.c
@@ -514,7 +514,7 @@  static inline void io_queue_async_work(struct io_ring_ctx *ctx,
 	}
 
 	req->files = current->files;
-
+	ctx->sqo_mm = current->mm;
 	spin_lock_irqsave(&ctx->task_lock, flags);
 	list_add(&req->task_list, &ctx->task_list);
 	req->work_task = NULL;