@@ -3845,6 +3845,7 @@ attach_rcu_head_to_object(void *obj)
void kvfree_call_rcu(struct rcu_head *head, void *ptr)
{
unsigned long flags;
+ struct rcu_gp_oldstate old_snap;
struct kfree_rcu_cpu *krcp;
bool success;
@@ -3855,8 +3856,10 @@ void kvfree_call_rcu(struct rcu_head *head, void *ptr)
* only. For other places please embed an rcu_head to
* your data.
*/
- if (!head)
+ if (!head) {
might_sleep();
+ start_poll_synchronize_rcu_full(&old_snap);
+ }
// Queue the object but don't yet schedule the batch.
if (debug_rcu_head_queue(ptr)) {
@@ -3917,7 +3920,10 @@ void kvfree_call_rcu(struct rcu_head *head, void *ptr)
*/
if (!success) {
debug_rcu_head_unqueue((struct rcu_head *) ptr);
- synchronize_rcu();
+
+ if (!poll_state_synchronize_rcu_full(&old_snap))
+ synchronize_rcu();
+
kvfree(ptr);
}
}
For a single argument use a polled API to see if a GP is already passed. This allows to bypass an extra GP request in a slow path. Allocating a page or dynamic rcu_head might take some and still fail, in that scenario a GP which is requested on entry of kvfree_call_rcu() can be elapsed. Benefit from this. Signed-off-by: Uladzislau Rezki (Sony) <urezki@gmail.com> --- kernel/rcu/tree.c | 10 ++++++++-- 1 file changed, 8 insertions(+), 2 deletions(-)