@@ -37,7 +37,7 @@ struct svc_pool {
struct list_head sp_sockets; /* pending sockets */
unsigned int sp_nrthreads; /* # of threads in pool */
struct list_head sp_all_threads; /* all server threads */
- struct list_head sp_idle_threads; /* idle server threads */
+ struct llist_head sp_idle_threads; /* idle server threads */
/* statistics on pool operation */
struct percpu_counter sp_messages_arrived;
@@ -186,7 +186,7 @@ extern u32 svc_max_payload(const struct svc_rqst *rqstp);
*/
struct svc_rqst {
struct list_head rq_all; /* all threads list */
- struct list_head rq_idle; /* On the idle list */
+ struct llist_node rq_idle; /* On the idle list */
struct rcu_head rq_rcu_head; /* for RCU deferred kfree */
struct svc_xprt * rq_xprt; /* transport ptr */
@@ -266,26 +266,15 @@ enum {
RQ_DATA, /* request has data */
};
-/**
- * svc_thread_set_busy - mark a thread as busy
- * @rqstp: the thread which is now busy
- *
- * If rq_idle is "empty", the thread must be busy.
- */
-static inline void svc_thread_set_busy(struct svc_rqst *rqstp)
-{
- INIT_LIST_HEAD(&rqstp->rq_idle);
-}
-
/**
* svc_thread_busy - check if a thread as busy
* @rqstp: the thread which might be busy
*
- * If rq_idle is "empty", the thread must be busy.
+ * A thread is only busy when it is not an the idle list.
*/
-static inline bool svc_thread_busy(struct svc_rqst *rqstp)
+static inline bool svc_thread_busy(const struct svc_rqst *rqstp)
{
- return list_empty(&rqstp->rq_idle);
+ return !llist_on_list(&rqstp->rq_idle);
}
#define SVC_NET(rqst) (rqst->rq_xprt ? rqst->rq_xprt->xpt_net : rqst->rq_bc_net)
@@ -510,7 +510,7 @@ __svc_create(struct svc_program *prog, unsigned int bufsize, int npools,
pool->sp_id = i;
INIT_LIST_HEAD(&pool->sp_sockets);
INIT_LIST_HEAD(&pool->sp_all_threads);
- INIT_LIST_HEAD(&pool->sp_idle_threads);
+ init_llist_head(&pool->sp_idle_threads);
spin_lock_init(&pool->sp_lock);
percpu_counter_init(&pool->sp_messages_arrived, 0, GFP_KERNEL);
@@ -642,7 +642,7 @@ svc_rqst_alloc(struct svc_serv *serv, struct svc_pool *pool, int node)
folio_batch_init(&rqstp->rq_fbatch);
- svc_thread_set_busy(rqstp);
+ init_llist_node(&rqstp->rq_idle);
rqstp->rq_server = serv;
rqstp->rq_pool = pool;
@@ -701,15 +701,15 @@ svc_prepare_thread(struct svc_serv *serv, struct svc_pool *pool, int node)
void svc_pool_wake_idle_thread(struct svc_pool *pool)
{
struct svc_rqst *rqstp;
+ struct llist_node *ln;
rcu_read_lock();
spin_lock_bh(&pool->sp_lock);
- rqstp = list_first_entry_or_null(&pool->sp_idle_threads,
- struct svc_rqst, rq_idle);
- if (rqstp)
- list_del_init(&rqstp->rq_idle);
+ ln = llist_del_first_init(&pool->sp_idle_threads);
spin_unlock_bh(&pool->sp_lock);
- if (rqstp) {
+ if (ln) {
+ rqstp = llist_entry(ln, struct svc_rqst, rq_idle);
+
WRITE_ONCE(rqstp->rq_qtime, ktime_get());
wake_up_process(rqstp->rq_task);
rcu_read_unlock();
@@ -715,10 +715,6 @@ rqst_should_sleep(struct svc_rqst *rqstp)
if (svc_thread_should_stop(rqstp))
return false;
- /* are we freezing? */
- if (freezing(current))
- return false;
-
#if defined(CONFIG_SUNRPC_BACKCHANNEL)
if (svc_is_backchannel(rqstp)) {
if (!list_empty(&rqstp->rq_server->sv_cb_list))
@@ -734,30 +730,26 @@ static void svc_rqst_wait_for_work(struct svc_rqst *rqstp)
struct svc_pool *pool = rqstp->rq_pool;
if (rqst_should_sleep(rqstp)) {
- set_current_state(TASK_IDLE);
- spin_lock_bh(&pool->sp_lock);
- list_add(&rqstp->rq_idle, &pool->sp_idle_threads);
- spin_unlock_bh(&pool->sp_lock);
+ set_current_state(TASK_IDLE | TASK_FREEZABLE);
+ llist_add(&rqstp->rq_idle, &pool->sp_idle_threads);
+
+ if (unlikely(!rqst_should_sleep(rqstp)))
+ /* Work just became available. This thread cannot simply
+ * choose not to sleep as it *must* wait until removed.
+ * So wake the first waiter - whether it is this
+ * thread or some other, it will get the work done.
+ */
+ svc_pool_wake_idle_thread(pool);
- /* Need to check should_sleep() again after
- * setting task state in case a wakeup happened
- * between testing and setting.
+ /* Since a thread cannot remove itself from an llist,
+ * schedule until someone else removes @rqstp from
+ * the idle list.
*/
- if (rqst_should_sleep(rqstp)) {
+ while (!svc_thread_busy(rqstp)) {
schedule();
- } else {
- __set_current_state(TASK_RUNNING);
- cond_resched();
- }
-
- /* We *must* be removed from the list before we can continue.
- * If we were woken, this is already done
- */
- if (!svc_thread_busy(rqstp)) {
- spin_lock_bh(&pool->sp_lock);
- list_del_init(&rqstp->rq_idle);
- spin_unlock_bh(&pool->sp_lock);
+ set_current_state(TASK_IDLE | TASK_FREEZABLE);
}
+ __set_current_state(TASK_RUNNING);
} else {
cond_resched();
}
@@ -870,9 +862,10 @@ void svc_recv(struct svc_rqst *rqstp)
struct svc_xprt *xprt = rqstp->rq_xprt;
/* Normally we will wait up to 5 seconds for any required
- * cache information to be provided.
+ * cache information to be provided. When there are no
+ * idle threads, we reduce the wait time.
*/
- if (!list_empty(&pool->sp_idle_threads))
+ if (pool->sp_idle_threads.first)
rqstp->rq_chandle.thread_wait = 5 * HZ;
else
rqstp->rq_chandle.thread_wait = 1 * HZ;