@@ -2877,11 +2877,13 @@ EXPORT_SYMBOL_GPL(call_rcu);
/**
* struct kvfree_rcu_bulk_data - single block to store kvfree_rcu() pointers
* @list: List node. All blocks are linked between each other
+ * @gp_snap: Snapshot of RCU state for objects placed to this bulk
* @nr_records: Number of active pointers in the array
* @records: Array of the kvfree_rcu() pointers
*/
struct kvfree_rcu_bulk_data {
struct list_head list;
+ unsigned long gp_snap;
unsigned long nr_records;
void *records[];
};
@@ -2898,13 +2900,15 @@ struct kvfree_rcu_bulk_data {
* struct kfree_rcu_cpu_work - single batch of kfree_rcu() requests
* @rcu_work: Let queue_rcu_work() invoke workqueue handler after grace period
* @head_free: List of kfree_rcu() objects waiting for a grace period
+ * @head_free_gp_snap: Snapshot of RCU state for objects placed to "@head_free"
* @bulk_head_free: Bulk-List of kvfree_rcu() objects waiting for a grace period
* @krcp: Pointer to @kfree_rcu_cpu structure
*/
struct kfree_rcu_cpu_work {
- struct rcu_work rcu_work;
+ struct work_struct rcu_work;
struct rcu_head *head_free;
+ unsigned long head_free_gp_snap;
struct list_head bulk_head_free[FREE_N_CHANNELS];
struct kfree_rcu_cpu *krcp;
};
@@ -3100,10 +3104,11 @@ static void kfree_rcu_work(struct work_struct *work)
struct rcu_head *head;
struct kfree_rcu_cpu *krcp;
struct kfree_rcu_cpu_work *krwp;
+ unsigned long head_free_gp_snap;
int i;
- krwp = container_of(to_rcu_work(work),
- struct kfree_rcu_cpu_work, rcu_work);
+ krwp = container_of(work,
+ struct kfree_rcu_cpu_work, rcu_work);
krcp = krwp->krcp;
raw_spin_lock_irqsave(&krcp->lock, flags);
@@ -3114,12 +3119,29 @@ static void kfree_rcu_work(struct work_struct *work)
// Channel 3.
head = krwp->head_free;
krwp->head_free = NULL;
+ head_free_gp_snap = krwp->head_free_gp_snap;
raw_spin_unlock_irqrestore(&krcp->lock, flags);
// Handle the first two channels.
- for (i = 0; i < FREE_N_CHANNELS; i++)
+ for (i = 0; i < FREE_N_CHANNELS; i++) {
+ // Start from the tail page, so a GP is likely passed for it.
+ list_for_each_entry_safe_reverse(bnode, n, &bulk_head[i], list) {
+ // Not yet ready? Bail out since we need one more GP.
+ if (!poll_state_synchronize_rcu(bnode->gp_snap))
+ break;
+
+ list_del_init(&bnode->list);
+ kvfree_rcu_bulk(krcp, bnode, i);
+ }
+
+ // Please note a request for one more extra GP can
+ // occur only once for all objects in this batch.
+ if (!list_empty(&bulk_head[i]))
+ synchronize_rcu();
+
list_for_each_entry_safe(bnode, n, &bulk_head[i], list)
kvfree_rcu_bulk(krcp, bnode, i);
+ }
/*
* This is used when the "bulk" path can not be used for the
@@ -3128,7 +3150,10 @@ static void kfree_rcu_work(struct work_struct *work)
* queued on a linked list through their rcu_head structures.
* This list is named "Channel 3".
*/
- kvfree_rcu_list(head);
+ if (head) {
+ cond_synchronize_rcu(head_free_gp_snap);
+ kvfree_rcu_list(head);
+ }
}
static bool
@@ -3195,6 +3220,11 @@ static void kfree_rcu_monitor(struct work_struct *work)
if (!krwp->head_free) {
krwp->head_free = krcp->head;
WRITE_ONCE(krcp->head, NULL);
+
+ // Take a snapshot for this krwp. Please note no more
+ // any objects can be added to attached head_free channel
+ // therefore fixate a GP for it here.
+ krwp->head_free_gp_snap = get_state_synchronize_rcu();
}
WRITE_ONCE(krcp->count, 0);
@@ -3204,7 +3234,7 @@ static void kfree_rcu_monitor(struct work_struct *work)
// be that the work is in the pending state when
// channels have been detached following by each
// other.
- queue_rcu_work(system_wq, &krwp->rcu_work);
+ queue_work(system_wq, &krwp->rcu_work);
}
}
@@ -3332,8 +3362,9 @@ add_ptr_to_bulk_krc_lock(struct kfree_rcu_cpu **krcp,
list_add(&bnode->list, &(*krcp)->bulk_head[idx]);
}
- /* Finally insert. */
+ // Finally insert and update the GP for this page.
bnode->records[bnode->nr_records++] = ptr;
+ bnode->gp_snap = get_state_synchronize_rcu();
return true;
}
@@ -4783,7 +4814,7 @@ static void __init kfree_rcu_batch_init(void)
struct kfree_rcu_cpu *krcp = per_cpu_ptr(&krc, cpu);
for (i = 0; i < KFREE_N_BATCHES; i++) {
- INIT_RCU_WORK(&krcp->krw_arr[i].rcu_work, kfree_rcu_work);
+ INIT_WORK(&krcp->krw_arr[i].rcu_work, kfree_rcu_work);
krcp->krw_arr[i].krcp = krcp;
for (j = 0; j < FREE_N_CHANNELS; j++)