@@ -324,16 +324,18 @@ pipe_read(struct kiocb *iocb, struct iov_iter *to)
}
if (!buf->len) {
+ bool wake;
pipe_buf_release(pipe, buf);
spin_lock_irq(&pipe->wait.lock);
tail++;
pipe->tail = tail;
do_wakeup = 1;
- if (head - (tail - 1) == pipe->max_usage)
+ wake = head - (tail - 1) == pipe->max_usage / 2;
+ if (wake)
wake_up_interruptible_sync_poll_locked(
&pipe->wait, EPOLLOUT | EPOLLWRNORM);
spin_unlock_irq(&pipe->wait.lock);
- if (head - (tail - 1) == pipe->max_usage)
+ if (wake)
kill_fasync(&pipe->fasync_writers, SIGIO, POLL_OUT);
}
total_len -= chars;
Increase the threshold at which the reader sends a wake event to the writers in the queue such that the queue must be half empty before the wake is issued rather than the wake being issued when just a single slot available. This reduces the number of context switches in the tests significantly, without altering the amount of work achieved. With my pipe-bench program, there's a 20% reduction versus an unpatched kernel. Suggested-by: Konstantin Khlebnikov <khlebnikov@yandex-team.ru> Signed-off-by: David Howells <dhowells@redhat.com> --- fs/pipe.c | 6 ++++-- 1 file changed, 4 insertions(+), 2 deletions(-)