diff mbox series

[v3,7/8] accel/tcg: Move PageDesc tree into tb-maint.c for system

Message ID 20221209051914.398215-8-richard.henderson@linaro.org (mailing list archive)
State New, archived
Headers show
Series accel/tcg: Rewrite user-only vma tracking | expand

Commit Message

Richard Henderson Dec. 9, 2022, 5:19 a.m. UTC
Now that PageDesc is not used for user-only, and for system
it is only used for tb maintenance, move the implementation
into tb-main.c appropriately ifdefed.

We have not yet eliminated all references to PageDesc for
user-only, so retain a typedef to the structure without definition.

Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
---
 accel/tcg/internal.h      |  49 +++-----------
 accel/tcg/tb-maint.c      | 130 ++++++++++++++++++++++++++++++++++++--
 accel/tcg/translate-all.c |  95 ----------------------------
 3 files changed, 134 insertions(+), 140 deletions(-)

Comments

Philippe Mathieu-Daudé Dec. 9, 2022, 7:22 a.m. UTC | #1
On 9/12/22 06:19, Richard Henderson wrote:
> Now that PageDesc is not used for user-only, and for system
> it is only used for tb maintenance, move the implementation
> into tb-main.c appropriately ifdefed.
> 
> We have not yet eliminated all references to PageDesc for
> user-only, so retain a typedef to the structure without definition.
> 
> Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
> ---
>   accel/tcg/internal.h      |  49 +++-----------
>   accel/tcg/tb-maint.c      | 130 ++++++++++++++++++++++++++++++++++++--
>   accel/tcg/translate-all.c |  95 ----------------------------
>   3 files changed, 134 insertions(+), 140 deletions(-)


> -/*
> - * In system mode we want L1_MAP to be based on ram offsets,
> - * while in user mode we want it to be based on virtual addresses.
> - *
> - * TODO: For user mode, see the caveat re host vs guest virtual
> - * address spaces near GUEST_ADDR_MAX.
> - */
> -#if !defined(CONFIG_USER_ONLY)
> -#if HOST_LONG_BITS < TARGET_PHYS_ADDR_SPACE_BITS
> -# define L1_MAP_ADDR_SPACE_BITS  HOST_LONG_BITS
> -#else
> -# define L1_MAP_ADDR_SPACE_BITS  TARGET_PHYS_ADDR_SPACE_BITS
> -#endif
> -#else
> -# define L1_MAP_ADDR_SPACE_BITS  MIN(HOST_LONG_BITS, TARGET_ABI_BITS)
> -#endif


> diff --git a/accel/tcg/tb-maint.c b/accel/tcg/tb-maint.c
> index 20e86c813d..9b996bbeb2 100644
> --- a/accel/tcg/tb-maint.c
> +++ b/accel/tcg/tb-maint.c
> @@ -127,6 +127,121 @@ static PageForEachNext foreach_tb_next(PageForEachNext tb,
>   }
>   
>   #else
> +/*
> + * In system mode we want L1_MAP to be based on ram offsets.
> + */
> +#if HOST_LONG_BITS < TARGET_PHYS_ADDR_SPACE_BITS
> +# define L1_MAP_ADDR_SPACE_BITS  HOST_LONG_BITS
> +#else
> +# define L1_MAP_ADDR_SPACE_BITS  TARGET_PHYS_ADDR_SPACE_BITS
> +#endif
So you removed L1_MAP_ADDR_SPACE_BITS in this patch. If you ever respin,
I'd rather have it cleaned in the previous patch, along with the comment
updated and TODO removed.
Philippe Mathieu-Daudé Dec. 9, 2022, 9:28 a.m. UTC | #2
On 9/12/22 06:19, Richard Henderson wrote:
> Now that PageDesc is not used for user-only, and for system
> it is only used for tb maintenance, move the implementation
> into tb-main.c appropriately ifdefed.
> 
> We have not yet eliminated all references to PageDesc for
> user-only, so retain a typedef to the structure without definition.
> 
> Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
> ---
>   accel/tcg/internal.h      |  49 +++-----------
>   accel/tcg/tb-maint.c      | 130 ++++++++++++++++++++++++++++++++++++--
>   accel/tcg/translate-all.c |  95 ----------------------------
>   3 files changed, 134 insertions(+), 140 deletions(-)


> diff --git a/accel/tcg/tb-maint.c b/accel/tcg/tb-maint.c
> index 20e86c813d..9b996bbeb2 100644
> --- a/accel/tcg/tb-maint.c
> +++ b/accel/tcg/tb-maint.c

Expanding diff context:

     #ifdef CONFIG_USER_ONLY

     ...

>   #else


> +PageDesc *page_find_alloc(tb_page_addr_t index, bool alloc)
> +{
> +    PageDesc *pd;
> +    void **lp;
> +    int i;
> +
> +    /* Level 1.  Always allocated.  */
> +    lp = l1_map + ((index >> v_l1_shift) & (v_l1_size - 1));
> +
> +    /* Level 2..N-1.  */
> +    for (i = v_l2_levels; i > 0; i--) {
> +        void **p = qatomic_rcu_read(lp);
> +
> +        if (p == NULL) {
> +            void *existing;
> +
> +            if (!alloc) {
> +                return NULL;
> +            }
> +            p = g_new0(void *, V_L2_SIZE);
> +            existing = qatomic_cmpxchg(lp, NULL, p);
> +            if (unlikely(existing)) {
> +                g_free(p);
> +                p = existing;
> +            }
> +        }
> +
> +        lp = p + ((index >> (i * V_L2_BITS)) & (V_L2_SIZE - 1));
> +    }
> +
> +    pd = qatomic_rcu_read(lp);
> +    if (pd == NULL) {
> +        void *existing;

            int i;

> +
> +        if (!alloc) {
> +            return NULL;
> +        }
> +        pd = g_new0(PageDesc, V_L2_SIZE);
> +#ifndef CONFIG_USER_ONLY

CONFIG_USER_ONLY never defined here, so this block can be simplified.

> +        {
> +            int i;
> +
> +            for (i = 0; i < V_L2_SIZE; i++) {
> +                qemu_spin_init(&pd[i].lock);
> +            }
> +        }
> +#endif
> +        existing = qatomic_cmpxchg(lp, NULL, pd);
> +        if (unlikely(existing)) {
> +#ifndef CONFIG_USER_ONLY
> +            {

Ditto.

> +                int i;
> +
> +                for (i = 0; i < V_L2_SIZE; i++) {
> +                    qemu_spin_destroy(&pd[i].lock);
> +                }
> +            }
> +#endif
> +            g_free(pd);
> +            pd = existing;
> +        }
> +    }
> +
> +    return pd + (index & (V_L2_SIZE - 1));
> +}
Richard Henderson Dec. 12, 2022, 3:28 p.m. UTC | #3
On 12/9/22 01:22, Philippe Mathieu-Daudé wrote:
> On 9/12/22 06:19, Richard Henderson wrote:
>> Now that PageDesc is not used for user-only, and for system
>> it is only used for tb maintenance, move the implementation
>> into tb-main.c appropriately ifdefed.
>>
>> We have not yet eliminated all references to PageDesc for
>> user-only, so retain a typedef to the structure without definition.
>>
>> Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
>> ---
>>   accel/tcg/internal.h      |  49 +++-----------
>>   accel/tcg/tb-maint.c      | 130 ++++++++++++++++++++++++++++++++++++--
>>   accel/tcg/translate-all.c |  95 ----------------------------
>>   3 files changed, 134 insertions(+), 140 deletions(-)
> 
> 
>> -/*
>> - * In system mode we want L1_MAP to be based on ram offsets,
>> - * while in user mode we want it to be based on virtual addresses.
>> - *
>> - * TODO: For user mode, see the caveat re host vs guest virtual
>> - * address spaces near GUEST_ADDR_MAX.
>> - */
>> -#if !defined(CONFIG_USER_ONLY)
>> -#if HOST_LONG_BITS < TARGET_PHYS_ADDR_SPACE_BITS
>> -# define L1_MAP_ADDR_SPACE_BITS  HOST_LONG_BITS
>> -#else
>> -# define L1_MAP_ADDR_SPACE_BITS  TARGET_PHYS_ADDR_SPACE_BITS
>> -#endif
>> -#else
>> -# define L1_MAP_ADDR_SPACE_BITS  MIN(HOST_LONG_BITS, TARGET_ABI_BITS)
>> -#endif
> 
> 
>> diff --git a/accel/tcg/tb-maint.c b/accel/tcg/tb-maint.c
>> index 20e86c813d..9b996bbeb2 100644
>> --- a/accel/tcg/tb-maint.c
>> +++ b/accel/tcg/tb-maint.c
>> @@ -127,6 +127,121 @@ static PageForEachNext foreach_tb_next(PageForEachNext tb,
>>   }
>>   #else
>> +/*
>> + * In system mode we want L1_MAP to be based on ram offsets.
>> + */
>> +#if HOST_LONG_BITS < TARGET_PHYS_ADDR_SPACE_BITS
>> +# define L1_MAP_ADDR_SPACE_BITS  HOST_LONG_BITS
>> +#else
>> +# define L1_MAP_ADDR_SPACE_BITS  TARGET_PHYS_ADDR_SPACE_BITS
>> +#endif
> So you removed L1_MAP_ADDR_SPACE_BITS in this patch. If you ever respin,
> I'd rather have it cleaned in the previous patch, along with the comment
> updated and TODO removed.

I don't agree.  I move all of the PageDesc symbols together in this patch.  I think that 
it would get in the way of the main point of the previous patch.


r~
Philippe Mathieu-Daudé Dec. 12, 2022, 6:19 p.m. UTC | #4
On 12/12/22 16:28, Richard Henderson wrote:
> On 12/9/22 01:22, Philippe Mathieu-Daudé wrote:
>> On 9/12/22 06:19, Richard Henderson wrote:
>>> Now that PageDesc is not used for user-only, and for system
>>> it is only used for tb maintenance, move the implementation
>>> into tb-main.c appropriately ifdefed.
>>>
>>> We have not yet eliminated all references to PageDesc for
>>> user-only, so retain a typedef to the structure without definition.
>>>
>>> Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
>>> ---
>>>   accel/tcg/internal.h      |  49 +++-----------
>>>   accel/tcg/tb-maint.c      | 130 ++++++++++++++++++++++++++++++++++++--
>>>   accel/tcg/translate-all.c |  95 ----------------------------
>>>   3 files changed, 134 insertions(+), 140 deletions(-)
>>
>>
>>> -/*
>>> - * In system mode we want L1_MAP to be based on ram offsets,
>>> - * while in user mode we want it to be based on virtual addresses.
>>> - *
>>> - * TODO: For user mode, see the caveat re host vs guest virtual
>>> - * address spaces near GUEST_ADDR_MAX.
>>> - */
>>> -#if !defined(CONFIG_USER_ONLY)
>>> -#if HOST_LONG_BITS < TARGET_PHYS_ADDR_SPACE_BITS
>>> -# define L1_MAP_ADDR_SPACE_BITS  HOST_LONG_BITS
>>> -#else
>>> -# define L1_MAP_ADDR_SPACE_BITS  TARGET_PHYS_ADDR_SPACE_BITS
>>> -#endif
>>> -#else
>>> -# define L1_MAP_ADDR_SPACE_BITS  MIN(HOST_LONG_BITS, TARGET_ABI_BITS)
>>> -#endif
>>
>>
>>> diff --git a/accel/tcg/tb-maint.c b/accel/tcg/tb-maint.c
>>> index 20e86c813d..9b996bbeb2 100644
>>> --- a/accel/tcg/tb-maint.c
>>> +++ b/accel/tcg/tb-maint.c
>>> @@ -127,6 +127,121 @@ static PageForEachNext 
>>> foreach_tb_next(PageForEachNext tb,
>>>   }
>>>   #else
>>> +/*
>>> + * In system mode we want L1_MAP to be based on ram offsets.
>>> + */
>>> +#if HOST_LONG_BITS < TARGET_PHYS_ADDR_SPACE_BITS
>>> +# define L1_MAP_ADDR_SPACE_BITS  HOST_LONG_BITS
>>> +#else
>>> +# define L1_MAP_ADDR_SPACE_BITS  TARGET_PHYS_ADDR_SPACE_BITS
>>> +#endif
>> So you removed L1_MAP_ADDR_SPACE_BITS in this patch. If you ever respin,
>> I'd rather have it cleaned in the previous patch, along with the comment
>> updated and TODO removed.
> 
> I don't agree.  I move all of the PageDesc symbols together in this 
> patch.  I think that it would get in the way of the main point of the 
> previous patch.

OK then :)
diff mbox series

Patch

diff --git a/accel/tcg/internal.h b/accel/tcg/internal.h
index be19bdf088..14b89c4ee8 100644
--- a/accel/tcg/internal.h
+++ b/accel/tcg/internal.h
@@ -23,51 +23,13 @@ 
 #define assert_memory_lock() tcg_debug_assert(have_mmap_lock())
 #endif
 
-typedef struct PageDesc {
+typedef struct PageDesc PageDesc;
 #ifndef CONFIG_USER_ONLY
+struct PageDesc {
     QemuSpin lock;
     /* list of TBs intersecting this ram page */
     uintptr_t first_tb;
-#endif
-} PageDesc;
-
-/*
- * In system mode we want L1_MAP to be based on ram offsets,
- * while in user mode we want it to be based on virtual addresses.
- *
- * TODO: For user mode, see the caveat re host vs guest virtual
- * address spaces near GUEST_ADDR_MAX.
- */
-#if !defined(CONFIG_USER_ONLY)
-#if HOST_LONG_BITS < TARGET_PHYS_ADDR_SPACE_BITS
-# define L1_MAP_ADDR_SPACE_BITS  HOST_LONG_BITS
-#else
-# define L1_MAP_ADDR_SPACE_BITS  TARGET_PHYS_ADDR_SPACE_BITS
-#endif
-#else
-# define L1_MAP_ADDR_SPACE_BITS  MIN(HOST_LONG_BITS, TARGET_ABI_BITS)
-#endif
-
-/* Size of the L2 (and L3, etc) page tables.  */
-#define V_L2_BITS 10
-#define V_L2_SIZE (1 << V_L2_BITS)
-
-/*
- * L1 Mapping properties
- */
-extern int v_l1_size;
-extern int v_l1_shift;
-extern int v_l2_levels;
-
-/*
- * The bottom level has pointers to PageDesc, and is indexed by
- * anything from 4 to (V_L2_BITS + 3) bits, depending on target page size.
- */
-#define V_L1_MIN_BITS 4
-#define V_L1_MAX_BITS (V_L2_BITS + 3)
-#define V_L1_MAX_SIZE (1 << V_L1_MAX_BITS)
-
-extern void *l1_map[V_L1_MAX_SIZE];
+};
 
 PageDesc *page_find_alloc(tb_page_addr_t index, bool alloc);
 
@@ -76,6 +38,11 @@  static inline PageDesc *page_find(tb_page_addr_t index)
     return page_find_alloc(index, false);
 }
 
+void page_table_config_init(void);
+#else
+static inline void page_table_config_init(void) { }
+#endif
+
 /* list iterators for lists of tagged pointers in TranslationBlock */
 #define TB_FOR_EACH_TAGGED(head, tb, n, field)                          \
     for (n = (head) & 1, tb = (TranslationBlock *)((head) & ~1);        \
diff --git a/accel/tcg/tb-maint.c b/accel/tcg/tb-maint.c
index 20e86c813d..9b996bbeb2 100644
--- a/accel/tcg/tb-maint.c
+++ b/accel/tcg/tb-maint.c
@@ -127,6 +127,121 @@  static PageForEachNext foreach_tb_next(PageForEachNext tb,
 }
 
 #else
+/*
+ * In system mode we want L1_MAP to be based on ram offsets.
+ */
+#if HOST_LONG_BITS < TARGET_PHYS_ADDR_SPACE_BITS
+# define L1_MAP_ADDR_SPACE_BITS  HOST_LONG_BITS
+#else
+# define L1_MAP_ADDR_SPACE_BITS  TARGET_PHYS_ADDR_SPACE_BITS
+#endif
+
+/* Size of the L2 (and L3, etc) page tables.  */
+#define V_L2_BITS 10
+#define V_L2_SIZE (1 << V_L2_BITS)
+
+/*
+ * L1 Mapping properties
+ */
+static int v_l1_size;
+static int v_l1_shift;
+static int v_l2_levels;
+
+/*
+ * The bottom level has pointers to PageDesc, and is indexed by
+ * anything from 4 to (V_L2_BITS + 3) bits, depending on target page size.
+ */
+#define V_L1_MIN_BITS 4
+#define V_L1_MAX_BITS (V_L2_BITS + 3)
+#define V_L1_MAX_SIZE (1 << V_L1_MAX_BITS)
+
+static void *l1_map[V_L1_MAX_SIZE];
+
+void page_table_config_init(void)
+{
+    uint32_t v_l1_bits;
+
+    assert(TARGET_PAGE_BITS);
+    /* The bits remaining after N lower levels of page tables.  */
+    v_l1_bits = (L1_MAP_ADDR_SPACE_BITS - TARGET_PAGE_BITS) % V_L2_BITS;
+    if (v_l1_bits < V_L1_MIN_BITS) {
+        v_l1_bits += V_L2_BITS;
+    }
+
+    v_l1_size = 1 << v_l1_bits;
+    v_l1_shift = L1_MAP_ADDR_SPACE_BITS - TARGET_PAGE_BITS - v_l1_bits;
+    v_l2_levels = v_l1_shift / V_L2_BITS - 1;
+
+    assert(v_l1_bits <= V_L1_MAX_BITS);
+    assert(v_l1_shift % V_L2_BITS == 0);
+    assert(v_l2_levels >= 0);
+}
+
+PageDesc *page_find_alloc(tb_page_addr_t index, bool alloc)
+{
+    PageDesc *pd;
+    void **lp;
+    int i;
+
+    /* Level 1.  Always allocated.  */
+    lp = l1_map + ((index >> v_l1_shift) & (v_l1_size - 1));
+
+    /* Level 2..N-1.  */
+    for (i = v_l2_levels; i > 0; i--) {
+        void **p = qatomic_rcu_read(lp);
+
+        if (p == NULL) {
+            void *existing;
+
+            if (!alloc) {
+                return NULL;
+            }
+            p = g_new0(void *, V_L2_SIZE);
+            existing = qatomic_cmpxchg(lp, NULL, p);
+            if (unlikely(existing)) {
+                g_free(p);
+                p = existing;
+            }
+        }
+
+        lp = p + ((index >> (i * V_L2_BITS)) & (V_L2_SIZE - 1));
+    }
+
+    pd = qatomic_rcu_read(lp);
+    if (pd == NULL) {
+        void *existing;
+
+        if (!alloc) {
+            return NULL;
+        }
+        pd = g_new0(PageDesc, V_L2_SIZE);
+#ifndef CONFIG_USER_ONLY
+        {
+            int i;
+
+            for (i = 0; i < V_L2_SIZE; i++) {
+                qemu_spin_init(&pd[i].lock);
+            }
+        }
+#endif
+        existing = qatomic_cmpxchg(lp, NULL, pd);
+        if (unlikely(existing)) {
+#ifndef CONFIG_USER_ONLY
+            {
+                int i;
+
+                for (i = 0; i < V_L2_SIZE; i++) {
+                    qemu_spin_destroy(&pd[i].lock);
+                }
+            }
+#endif
+            g_free(pd);
+            pd = existing;
+        }
+    }
+
+    return pd + (index & (V_L2_SIZE - 1));
+}
 
 /* Set to NULL all the 'first_tb' fields in all PageDescs. */
 static void tb_remove_all_1(int level, void **lp)
@@ -420,6 +535,17 @@  static void tb_phys_invalidate__locked(TranslationBlock *tb)
     qemu_thread_jit_execute();
 }
 
+#ifdef CONFIG_USER_ONLY
+static inline void page_lock_pair(PageDesc **ret_p1, tb_page_addr_t phys1,
+                                  PageDesc **ret_p2, tb_page_addr_t phys2,
+                                  bool alloc)
+{
+    *ret_p1 = NULL;
+    *ret_p2 = NULL;
+}
+static inline void page_lock_tb(const TranslationBlock *tb) { }
+static inline void page_unlock_tb(const TranslationBlock *tb) { }
+#else
 static void page_lock_pair(PageDesc **ret_p1, tb_page_addr_t phys1,
                            PageDesc **ret_p2, tb_page_addr_t phys2, bool alloc)
 {
@@ -460,10 +586,6 @@  static void page_lock_pair(PageDesc **ret_p1, tb_page_addr_t phys1,
     }
 }
 
-#ifdef CONFIG_USER_ONLY
-static inline void page_lock_tb(const TranslationBlock *tb) { }
-static inline void page_unlock_tb(const TranslationBlock *tb) { }
-#else
 /* lock the page(s) of a TB in the correct acquisition order */
 static void page_lock_tb(const TranslationBlock *tb)
 {
diff --git a/accel/tcg/translate-all.c b/accel/tcg/translate-all.c
index 0d7596fcb8..90787bc04f 100644
--- a/accel/tcg/translate-all.c
+++ b/accel/tcg/translate-all.c
@@ -114,37 +114,8 @@  QEMU_BUILD_BUG_ON(CPU_TRACE_DSTATE_MAX_EVENTS >
                   sizeof_field(TranslationBlock, trace_vcpu_dstate)
                   * BITS_PER_BYTE);
 
-/*
- * L1 Mapping properties
- */
-int v_l1_size;
-int v_l1_shift;
-int v_l2_levels;
-
-void *l1_map[V_L1_MAX_SIZE];
-
 TBContext tb_ctx;
 
-static void page_table_config_init(void)
-{
-    uint32_t v_l1_bits;
-
-    assert(TARGET_PAGE_BITS);
-    /* The bits remaining after N lower levels of page tables.  */
-    v_l1_bits = (L1_MAP_ADDR_SPACE_BITS - TARGET_PAGE_BITS) % V_L2_BITS;
-    if (v_l1_bits < V_L1_MIN_BITS) {
-        v_l1_bits += V_L2_BITS;
-    }
-
-    v_l1_size = 1 << v_l1_bits;
-    v_l1_shift = L1_MAP_ADDR_SPACE_BITS - TARGET_PAGE_BITS - v_l1_bits;
-    v_l2_levels = v_l1_shift / V_L2_BITS - 1;
-
-    assert(v_l1_bits <= V_L1_MAX_BITS);
-    assert(v_l1_shift % V_L2_BITS == 0);
-    assert(v_l2_levels >= 0);
-}
-
 /* Encode VAL as a signed leb128 sequence at P.
    Return P incremented past the encoded value.  */
 static uint8_t *encode_sleb128(uint8_t *p, target_long val)
@@ -404,72 +375,6 @@  void page_init(void)
 #endif
 }
 
-PageDesc *page_find_alloc(tb_page_addr_t index, bool alloc)
-{
-    PageDesc *pd;
-    void **lp;
-    int i;
-
-    /* Level 1.  Always allocated.  */
-    lp = l1_map + ((index >> v_l1_shift) & (v_l1_size - 1));
-
-    /* Level 2..N-1.  */
-    for (i = v_l2_levels; i > 0; i--) {
-        void **p = qatomic_rcu_read(lp);
-
-        if (p == NULL) {
-            void *existing;
-
-            if (!alloc) {
-                return NULL;
-            }
-            p = g_new0(void *, V_L2_SIZE);
-            existing = qatomic_cmpxchg(lp, NULL, p);
-            if (unlikely(existing)) {
-                g_free(p);
-                p = existing;
-            }
-        }
-
-        lp = p + ((index >> (i * V_L2_BITS)) & (V_L2_SIZE - 1));
-    }
-
-    pd = qatomic_rcu_read(lp);
-    if (pd == NULL) {
-        void *existing;
-
-        if (!alloc) {
-            return NULL;
-        }
-        pd = g_new0(PageDesc, V_L2_SIZE);
-#ifndef CONFIG_USER_ONLY
-        {
-            int i;
-
-            for (i = 0; i < V_L2_SIZE; i++) {
-                qemu_spin_init(&pd[i].lock);
-            }
-        }
-#endif
-        existing = qatomic_cmpxchg(lp, NULL, pd);
-        if (unlikely(existing)) {
-#ifndef CONFIG_USER_ONLY
-            {
-                int i;
-
-                for (i = 0; i < V_L2_SIZE; i++) {
-                    qemu_spin_destroy(&pd[i].lock);
-                }
-            }
-#endif
-            g_free(pd);
-            pd = existing;
-        }
-    }
-
-    return pd + (index & (V_L2_SIZE - 1));
-}
-
 /* In user-mode page locks aren't used; mmap_lock is enough */
 #ifdef CONFIG_USER_ONLY
 struct page_collection *