@@ -538,6 +538,9 @@ enum {
/* set/get number of fixed workers */
IORING_REGISTER_IOWQ_FIXED_WORKERS = 26,
+ /* destroy fixed workers */
+ IORING_UNREGISTER_IOWQ_FIXED_WORKERS = 27,
+
/* this goes last */
IORING_REGISTER_LAST,
@@ -1386,7 +1386,7 @@ static void io_wq_clean_fixed_workers(struct io_wq *wq)
if (!workers[j])
continue;
workers[j]->flags |= IO_WORKER_F_EXIT;
- wake_up_process(worker->task);
+ wake_up_process(workers[j]->task);
}
kfree(workers);
}
@@ -1456,6 +1456,54 @@ int io_wq_fixed_workers(struct io_wq *wq, struct io_uring_fixed_worker_arg *coun
return ret;
}
+/*
+ * destroy fixed workers.
+ */
+int io_wq_destroy_fixed_workers(struct io_wq *wq)
+{
+ int i, j;
+
+ raw_spin_lock(&wq->lock);
+ for (i = 0; i < IO_WQ_ACCT_NR; i++) {
+ if (wq->acct[i].fixed_nr)
+ break;
+ }
+ raw_spin_unlock(&wq->lock);
+ if (i == IO_WQ_ACCT_NR)
+ return -EFAULT;
+
+ BUILD_BUG_ON((int) IO_WQ_ACCT_BOUND != (int) IO_WQ_BOUND);
+ BUILD_BUG_ON((int) IO_WQ_ACCT_UNBOUND != (int) IO_WQ_UNBOUND);
+ BUILD_BUG_ON((int) IO_WQ_ACCT_NR != 2);
+
+ rcu_read_lock();
+ raw_spin_lock(&wq->lock);
+ for (i = 0; i < IO_WQ_ACCT_NR; i++) {
+ struct io_wq_acct *acct = &wq->acct[i];
+ struct io_worker **workers = acct->fixed_workers;
+ unsigned int nr = acct->fixed_nr;
+
+ if (!nr)
+ continue;
+
+ for (j = 0; j < nr; j++) {
+ struct io_worker *worker = workers[j];
+
+ BUG_ON(!worker);
+ BUG_ON(!worker->task);
+
+ workers[j]->flags |= IO_WORKER_F_EXIT;
+ wake_up_process(worker->task);
+ }
+ // wait for all workers exit
+ kfree(workers);
+ }
+ raw_spin_unlock(&wq->lock);
+ rcu_read_unlock();
+
+ return 0;
+}
+
static __init int io_wq_init(void)
{
int ret;
@@ -53,6 +53,7 @@ void io_wq_hash_work(struct io_wq_work *work, void *val);
int io_wq_cpu_affinity(struct io_wq *wq, cpumask_var_t mask);
int io_wq_max_workers(struct io_wq *wq, int *new_count);
int io_wq_fixed_workers(struct io_wq *wq, struct io_uring_fixed_worker_arg *count);
+int io_wq_destroy_fixed_workers(struct io_wq *wq);
static inline bool io_wq_is_hashed(struct io_wq_work *work)
{
@@ -4416,6 +4416,45 @@ static __cold int io_register_iowq_fixed_workers(struct io_ring_ctx *ctx,
return ret;
}
+static __cold int io_unregister_iowq_fixed_workers(struct io_ring_ctx *ctx)
+ __must_hold(&ctx->uring_lock)
+{
+ struct io_uring_task *tctx = NULL;
+ struct io_sq_data *sqd = NULL;
+ int ret;
+
+ if (ctx->flags & IORING_SETUP_SQPOLL) {
+ sqd = ctx->sq_data;
+ if (sqd) {
+ /*
+ * Observe the correct sqd->lock -> ctx->uring_lock
+ * ordering. Fine to drop uring_lock here, we hold
+ * a ref to the ctx.
+ */
+ refcount_inc(&sqd->refs);
+ mutex_unlock(&ctx->uring_lock);
+ mutex_lock(&sqd->lock);
+ mutex_lock(&ctx->uring_lock);
+ if (sqd->thread)
+ tctx = sqd->thread->io_uring;
+ }
+ } else {
+ tctx = current->io_uring;
+ }
+
+ if (tctx && tctx->io_wq)
+ ret = io_wq_destroy_fixed_workers(tctx->io_wq);
+ else
+ ret = -EFAULT;
+
+ if (sqd) {
+ mutex_unlock(&sqd->lock);
+ io_put_sq_data(sqd);
+ }
+
+ return ret;
+}
+
static int __io_uring_register(struct io_ring_ctx *ctx, unsigned opcode,
void __user *arg, unsigned nr_args)
__releases(ctx->uring_lock)
@@ -4580,6 +4619,12 @@ static int __io_uring_register(struct io_ring_ctx *ctx, unsigned opcode,
break;
ret = io_register_iowq_fixed_workers(ctx, arg, nr_args);
break;
+ case IORING_UNREGISTER_IOWQ_FIXED_WORKERS:
+ ret = -EINVAL;
+ if (arg || nr_args)
+ break;
+ ret = io_unregister_iowq_fixed_workers(ctx);
+ break;
default:
ret = -EINVAL;
break;