@@ -403,12 +403,18 @@ struct anon_vma_name {
struct vm_area_struct {
/* The first cache line has the info for VMA tree walking. */
- unsigned long vm_start; /* Our start address within vm_mm. */
- unsigned long vm_end; /* The first byte after our end address
- within vm_mm. */
+ union {
+ struct {
+ /* VMA covers [vm_start; vm_end) addresses within mm */
+ unsigned long vm_start, vm_end;
- /* linked list of VM areas per task, sorted by address */
- struct vm_area_struct *vm_next, *vm_prev;
+ /* linked list of VMAs per task, sorted by address */
+ struct vm_area_struct *vm_next, *vm_prev;
+ };
+#ifdef CONFIG_PER_VMA_LOCK
+ struct rcu_head vm_rcu; /* Used for deferred freeing. */
+#endif
+ };
struct rb_node vm_rb;
@@ -481,10 +481,23 @@ struct vm_area_struct *vm_area_dup(struct vm_area_struct *orig)
return new;
}
+#ifdef CONFIG_PER_VMA_LOCK
+static void __vm_area_free(struct rcu_head *head)
+{
+ struct vm_area_struct *vma = container_of(head, struct vm_area_struct,
+ vm_rcu);
+ kmem_cache_free(vm_area_cachep, vma);
+}
+#endif
+
void vm_area_free(struct vm_area_struct *vma)
{
free_anon_vma_name(vma);
+#ifdef CONFIG_PER_VMA_LOCK
+ call_rcu(&vma->vm_rcu, __vm_area_free);
+#else
kmem_cache_free(vm_area_cachep, vma);
+#endif
}
static void account_kernel_stack(struct task_struct *tsk, int account)