diff mbox series

[RFC,2/3] io_uring: add IOURING_REGISTER_RESTRICTIONS opcode

Message ID 20200710141945.129329-3-sgarzare@redhat.com (mailing list archive)
State New, archived
Headers show
Series io_uring: add restrictions to support untrusted applications and guests | expand

Commit Message

Stefano Garzarella July 10, 2020, 2:19 p.m. UTC
The new io_uring_register(2) IOURING_REGISTER_RESTRICTIONS opcode
permanently installs a feature whitelist on an io_ring_ctx.
The io_ring_ctx can then be passed to untrusted code with the
knowledge that only operations present in the whitelist can be
executed.

The whitelist approach ensures that new features added to io_uring
do not accidentally become available when an existing application
is launched on a newer kernel version.

Currently is it possible to restrict sqe opcodes and register
opcodes. It is also possible to allow only fixed files.

IOURING_REGISTER_RESTRICTIONS can only be made once. Afterwards
it is not possible to change restrictions anymore.
This prevents untrusted code from removing restrictions.

Suggested-by: Stefan Hajnoczi <stefanha@redhat.com>
Signed-off-by: Stefano Garzarella <sgarzare@redhat.com>
---
 fs/io_uring.c                 | 98 ++++++++++++++++++++++++++++++++++-
 include/uapi/linux/io_uring.h | 30 +++++++++++
 2 files changed, 127 insertions(+), 1 deletion(-)

Comments

Jens Axboe July 10, 2020, 5:52 p.m. UTC | #1
On 7/10/20 8:19 AM, Stefano Garzarella wrote:
> The new io_uring_register(2) IOURING_REGISTER_RESTRICTIONS opcode
> permanently installs a feature whitelist on an io_ring_ctx.
> The io_ring_ctx can then be passed to untrusted code with the
> knowledge that only operations present in the whitelist can be
> executed.
> 
> The whitelist approach ensures that new features added to io_uring
> do not accidentally become available when an existing application
> is launched on a newer kernel version.

Keeping with the trend of the times, you should probably use 'allowlist'
here instead of 'whitelist'.
> 
> Currently is it possible to restrict sqe opcodes and register
> opcodes. It is also possible to allow only fixed files.
> 
> IOURING_REGISTER_RESTRICTIONS can only be made once. Afterwards
> it is not possible to change restrictions anymore.
> This prevents untrusted code from removing restrictions.

A few comments below.

> @@ -337,6 +344,7 @@ struct io_ring_ctx {
>  	struct llist_head		file_put_llist;
>  
>  	struct work_struct		exit_work;
> +	struct io_restriction		restrictions;
>  };
>  
>  /*

Since very few will use this feature, was going to suggest that we make
it dynamically allocated. But it's just 32 bytes, currently, so probably
not worth the effort...

> @@ -5491,6 +5499,11 @@ static int io_req_set_file(struct io_submit_state *state, struct io_kiocb *req,
>  	if (unlikely(!fixed && io_async_submit(req->ctx)))
>  		return -EBADF;
>  
> +	if (unlikely(!fixed && req->ctx->restrictions.enabled &&
> +		     test_bit(IORING_RESTRICTION_FIXED_FILES_ONLY,
> +			      req->ctx->restrictions.restriction_op)))
> +		return -EACCES;
> +
>  	return io_file_get(state, req, fd, &req->file, fixed);
>  }

This one hurts, though. I don't want any extra overhead from the
feature, and you're digging deep in ctx here to figure out of we need to
check.

Generally, all the checking needs to be out-of-line, and it needs to
base the decision on whether to check something or not on a cache hot
piece of data. So I'd suggest to turn all of these into some flag.
ctx->flags generally mirrors setup flags, so probably just add a:

	unsigned int restrictions : 1;

after eventfd_async : 1 in io_ring_ctx. That's free, plenty of room
there and that cacheline is already pulled in for reading.
Stefano Garzarella July 13, 2020, 8:07 a.m. UTC | #2
On Fri, Jul 10, 2020 at 11:52:48AM -0600, Jens Axboe wrote:
> On 7/10/20 8:19 AM, Stefano Garzarella wrote:
> > The new io_uring_register(2) IOURING_REGISTER_RESTRICTIONS opcode
> > permanently installs a feature whitelist on an io_ring_ctx.
> > The io_ring_ctx can then be passed to untrusted code with the
> > knowledge that only operations present in the whitelist can be
> > executed.
> > 
> > The whitelist approach ensures that new features added to io_uring
> > do not accidentally become available when an existing application
> > is launched on a newer kernel version.
> 
> Keeping with the trend of the times, you should probably use 'allowlist'
> here instead of 'whitelist'.

Sure, it is better!

> > 
> > Currently is it possible to restrict sqe opcodes and register
> > opcodes. It is also possible to allow only fixed files.
> > 
> > IOURING_REGISTER_RESTRICTIONS can only be made once. Afterwards
> > it is not possible to change restrictions anymore.
> > This prevents untrusted code from removing restrictions.
> 
> A few comments below.
> 
> > @@ -337,6 +344,7 @@ struct io_ring_ctx {
> >  	struct llist_head		file_put_llist;
> >  
> >  	struct work_struct		exit_work;
> > +	struct io_restriction		restrictions;
> >  };
> >  
> >  /*
> 
> Since very few will use this feature, was going to suggest that we make
> it dynamically allocated. But it's just 32 bytes, currently, so probably
> not worth the effort...
> 

Yeah, I'm not sure it will grow in the future, so I'm tempted to leave it
as it is, but I can easily change it if you prefer.

> > @@ -5491,6 +5499,11 @@ static int io_req_set_file(struct io_submit_state *state, struct io_kiocb *req,
> >  	if (unlikely(!fixed && io_async_submit(req->ctx)))
> >  		return -EBADF;
> >  
> > +	if (unlikely(!fixed && req->ctx->restrictions.enabled &&
> > +		     test_bit(IORING_RESTRICTION_FIXED_FILES_ONLY,
> > +			      req->ctx->restrictions.restriction_op)))
> > +		return -EACCES;
> > +
> >  	return io_file_get(state, req, fd, &req->file, fixed);
> >  }
> 
> This one hurts, though. I don't want any extra overhead from the
> feature, and you're digging deep in ctx here to figure out of we need to
> check.
> 
> Generally, all the checking needs to be out-of-line, and it needs to
> base the decision on whether to check something or not on a cache hot
> piece of data. So I'd suggest to turn all of these into some flag.
> ctx->flags generally mirrors setup flags, so probably just add a:
> 
> 	unsigned int restrictions : 1;
> 
> after eventfd_async : 1 in io_ring_ctx. That's free, plenty of room
> there and that cacheline is already pulled in for reading.
> 

Thanks for the clear explanation!

I left a TODO comment near the 'enabled' field to look for something better,
and what you're suggesting is what I was looking for :-)

I'll change it!

Thanks,
Stefano
diff mbox series

Patch

diff --git a/fs/io_uring.c b/fs/io_uring.c
index d37d7ea5ebe5..4768a9973d4b 100644
--- a/fs/io_uring.c
+++ b/fs/io_uring.c
@@ -218,6 +218,13 @@  struct io_buffer {
 	__u16 bid;
 };
 
+struct io_restriction {
+	DECLARE_BITMAP(register_op, IORING_REGISTER_LAST);
+	DECLARE_BITMAP(sqe_op, IORING_OP_LAST);
+	DECLARE_BITMAP(restriction_op, IORING_RESTRICTION_LAST);
+	bool enabled; /* TODO: remove and use a flag ?? */
+};
+
 struct io_ring_ctx {
 	struct {
 		struct percpu_ref	refs;
@@ -337,6 +344,7 @@  struct io_ring_ctx {
 	struct llist_head		file_put_llist;
 
 	struct work_struct		exit_work;
+	struct io_restriction		restrictions;
 };
 
 /*
@@ -5491,6 +5499,11 @@  static int io_req_set_file(struct io_submit_state *state, struct io_kiocb *req,
 	if (unlikely(!fixed && io_async_submit(req->ctx)))
 		return -EBADF;
 
+	if (unlikely(!fixed && req->ctx->restrictions.enabled &&
+		     test_bit(IORING_RESTRICTION_FIXED_FILES_ONLY,
+			      req->ctx->restrictions.restriction_op)))
+		return -EACCES;
+
 	return io_file_get(state, req, fd, &req->file, fixed);
 }
 
@@ -5895,6 +5908,10 @@  static int io_init_req(struct io_ring_ctx *ctx, struct io_kiocb *req,
 	if (unlikely(req->opcode >= IORING_OP_LAST))
 		return -EINVAL;
 
+	if (unlikely(ctx->restrictions.enabled &&
+		     !test_bit(req->opcode, ctx->restrictions.sqe_op)))
+		return -EACCES;
+
 	if (unlikely(io_sq_thread_acquire_mm(ctx, req)))
 		return -EFAULT;
 
@@ -8079,6 +8096,69 @@  static int io_unregister_personality(struct io_ring_ctx *ctx, unsigned id)
 	return -EINVAL;
 }
 
+static int io_register_restrictions(struct io_ring_ctx *ctx, void __user *arg,
+				    unsigned int nr_args)
+{
+	struct io_uring_restriction *res;
+	size_t size;
+	int i, ret;
+
+	/* We allow only a single restrictions registration */
+	if (ctx->restrictions.enabled)
+		return -EINVAL; /* TODO: check ret value */
+
+	/* TODO: Is it okay to set a maximum? */
+	if (!arg || nr_args > 256)
+		return -EINVAL;
+
+	size = array_size(nr_args, sizeof(*res));
+	if (size == SIZE_MAX)
+		return -EOVERFLOW;
+
+	res = kmalloc(size, GFP_KERNEL);
+	if (!res)
+		return -ENOMEM;
+
+	if (copy_from_user(res, arg, size)) {
+		ret = -EFAULT;
+		goto out;
+	}
+
+	for (i = 0; i < nr_args; i++) {
+		if (res[i].opcode >= IORING_RESTRICTION_LAST) {
+			ret = -EINVAL;
+			goto out;
+		}
+
+		__set_bit(res[i].opcode, ctx->restrictions.restriction_op);
+
+		if (res[i].opcode == IORING_RESTRICTION_REGISTER_OP) {
+			if (res[i].register_op >= IORING_REGISTER_LAST) {
+				ret = -EINVAL;
+				goto out;
+			}
+
+			__set_bit(res[i].register_op,
+				  ctx->restrictions.register_op);
+		} else if (res[i].opcode == IORING_RESTRICTION_SQE_OP) {
+			if (res[i].sqe_op >= IORING_OP_LAST) {
+				ret = -EINVAL;
+				goto out;
+			}
+
+			__set_bit(res[i].sqe_op, ctx->restrictions.sqe_op);
+		}
+	}
+
+	ctx->restrictions.enabled = true;
+
+	ret = 0;
+out:
+	/* TODO: should we reset all restrictions if an error happened? */
+	kfree(res);
+	return ret;
+}
+
 static bool io_register_op_must_quiesce(int op)
 {
 	switch (op) {
@@ -8125,6 +8205,18 @@  static int __io_uring_register(struct io_ring_ctx *ctx, unsigned opcode,
 		if (ret) {
 			percpu_ref_resurrect(&ctx->refs);
 			ret = -EINTR;
+			goto out_quiesce;
+		}
+	}
+
+	if (ctx->restrictions.enabled) {
+		if (opcode >= IORING_REGISTER_LAST) {
+			ret = -EINVAL;
+			goto out;
+		}
+
+		if (!test_bit(opcode, ctx->restrictions.register_op)) {
+			ret = -EACCES;
 			goto out;
 		}
 	}
@@ -8188,15 +8280,19 @@  static int __io_uring_register(struct io_ring_ctx *ctx, unsigned opcode,
 			break;
 		ret = io_unregister_personality(ctx, nr_args);
 		break;
+	case IORING_REGISTER_RESTRICTIONS:
+		ret = io_register_restrictions(ctx, arg, nr_args);
+		break;
 	default:
 		ret = -EINVAL;
 		break;
 	}
 
+out:
 	if (io_register_op_must_quiesce(opcode)) {
 		/* bring the ctx back to life */
 		percpu_ref_reinit(&ctx->refs);
-out:
+out_quiesce:
 		reinit_completion(&ctx->ref_comp);
 	}
 	return ret;
diff --git a/include/uapi/linux/io_uring.h b/include/uapi/linux/io_uring.h
index 2d18f1d0b5df..69f4684c988d 100644
--- a/include/uapi/linux/io_uring.h
+++ b/include/uapi/linux/io_uring.h
@@ -264,6 +264,7 @@  enum {
 	IORING_REGISTER_PROBE,
 	IORING_REGISTER_PERSONALITY,
 	IORING_UNREGISTER_PERSONALITY,
+	IORING_REGISTER_RESTRICTIONS,
 
 	/* this goes last */
 	IORING_REGISTER_LAST
@@ -292,4 +293,33 @@  struct io_uring_probe {
 	struct io_uring_probe_op ops[0];
 };
 
+struct io_uring_restriction {
+	__u16 opcode;
+	union {
+		__u8 register_op; /* IORING_RESTRICTION_REGISTER_OP */
+		__u8 sqe_op;      /* IORING_RESTRICTION_SQE_OP */
+	};
+	__u8 resv;
+	__u32 resv2[3];
+};
+
+/*
+ * io_uring_restriction->opcode values
+ */
+enum {
+	/* Allow an io_uring_register(2) opcode */
+	IORING_RESTRICTION_REGISTER_OP,
+
+	/* Allow an sqe opcode */
+	IORING_RESTRICTION_SQE_OP,
+
+	/* Only allow fixed files */
+	IORING_RESTRICTION_FIXED_FILES_ONLY,
+
+	/* Only allow registered addresses and translate them */
+	//TODO: IORING_RESTRICTION_BUFFER_CHECK,
+
+	IORING_RESTRICTION_LAST
+};
+
 #endif