diff mbox series

[v2,1/3] x86/mm: mod_l<N>_entry() have no need to use __copy_from_user()

Message ID 3bd38586-d76b-2ce5-a8bb-0777b30d5b61@suse.com (mailing list archive)
State New, archived
Headers show
Series x86/mm: (remaining) XSA-299 / 309 / 310 follow-up | expand

Commit Message

Jan Beulich Jan. 6, 2020, 3:34 p.m. UTC
mod_l1_entry()'s need to do so went away with commit 2d0557c5cb ("x86:
Fold page_info lock into type_info"), and the other three never had such
a need, at least going back as far as 3.2.0. Replace the uses by newly
introduced l<N>e_access_once().

Signed-off-by: Jan Beulich <jbeulich@suse.com>
---
v2: Use ACCESS_ONCE() clones.

Comments

Andrew Cooper Jan. 6, 2020, 4:09 p.m. UTC | #1
On 06/01/2020 15:34, Jan Beulich wrote:
> --- a/xen/include/asm-x86/page.h
> +++ b/xen/include/asm-x86/page.h
> @@ -55,6 +55,16 @@
>  #define l4e_write(l4ep, l4e) \
>      pte_write(&l4e_get_intpte(*(l4ep)), l4e_get_intpte(l4e))
>  
> +/* Type-correct ACCESS_ONCE() wrappers for PTE accesses. */
> +#define l1e_access_once(l1e) (*container_of(&ACCESS_ONCE(l1e_get_intpte(l1e)), \
> +                                            volatile l1_pgentry_t, l1))
> +#define l2e_access_once(l2e) (*container_of(&ACCESS_ONCE(l2e_get_intpte(l2e)), \
> +                                            volatile l2_pgentry_t, l2))
> +#define l3e_access_once(l3e) (*container_of(&ACCESS_ONCE(l3e_get_intpte(l3e)), \
> +                                            volatile l3_pgentry_t, l3))
> +#define l4e_access_once(l4e) (*container_of(&ACCESS_ONCE(l4e_get_intpte(l4e)), \
> +                                            volatile l4_pgentry_t, l4))

What's wrong with l?e_read_atomic() which already exist, and are already
used elsewhere?

If nothing, then Reviewed-by: Andrew Cooper <andrew.cooper3@citrix.com>
to save another round of posting.
Jan Beulich Jan. 6, 2020, 4:18 p.m. UTC | #2
On 06.01.2020 17:09, Andrew Cooper wrote:
> On 06/01/2020 15:34, Jan Beulich wrote:
>> --- a/xen/include/asm-x86/page.h
>> +++ b/xen/include/asm-x86/page.h
>> @@ -55,6 +55,16 @@
>>  #define l4e_write(l4ep, l4e) \
>>      pte_write(&l4e_get_intpte(*(l4ep)), l4e_get_intpte(l4e))
>>  
>> +/* Type-correct ACCESS_ONCE() wrappers for PTE accesses. */
>> +#define l1e_access_once(l1e) (*container_of(&ACCESS_ONCE(l1e_get_intpte(l1e)), \
>> +                                            volatile l1_pgentry_t, l1))
>> +#define l2e_access_once(l2e) (*container_of(&ACCESS_ONCE(l2e_get_intpte(l2e)), \
>> +                                            volatile l2_pgentry_t, l2))
>> +#define l3e_access_once(l3e) (*container_of(&ACCESS_ONCE(l3e_get_intpte(l3e)), \
>> +                                            volatile l3_pgentry_t, l3))
>> +#define l4e_access_once(l4e) (*container_of(&ACCESS_ONCE(l4e_get_intpte(l4e)), \
>> +                                            volatile l4_pgentry_t, l4))
> 
> What's wrong with l?e_read_atomic() which already exist, and are already
> used elsewhere?

I did consider going that route, but predicted you would object to its
use here: Iirc you've previously voiced opinion in this direction
(perhaps not on the page table accessors themselves but the underlying
{read,write}_u<N>_atomic()).

> If nothing, then Reviewed-by: Andrew Cooper <andrew.cooper3@citrix.com>
> to save another round of posting.

Let's get the above clarified - I'll be happy to switch to
l<N>e_read_atomic() if that's fine by you.

Jan
Andrew Cooper Jan. 6, 2020, 4:30 p.m. UTC | #3
On 06/01/2020 16:18, Jan Beulich wrote:
> On 06.01.2020 17:09, Andrew Cooper wrote:
>> On 06/01/2020 15:34, Jan Beulich wrote:
>>> --- a/xen/include/asm-x86/page.h
>>> +++ b/xen/include/asm-x86/page.h
>>> @@ -55,6 +55,16 @@
>>>  #define l4e_write(l4ep, l4e) \
>>>      pte_write(&l4e_get_intpte(*(l4ep)), l4e_get_intpte(l4e))
>>>  
>>> +/* Type-correct ACCESS_ONCE() wrappers for PTE accesses. */
>>> +#define l1e_access_once(l1e) (*container_of(&ACCESS_ONCE(l1e_get_intpte(l1e)), \
>>> +                                            volatile l1_pgentry_t, l1))
>>> +#define l2e_access_once(l2e) (*container_of(&ACCESS_ONCE(l2e_get_intpte(l2e)), \
>>> +                                            volatile l2_pgentry_t, l2))
>>> +#define l3e_access_once(l3e) (*container_of(&ACCESS_ONCE(l3e_get_intpte(l3e)), \
>>> +                                            volatile l3_pgentry_t, l3))
>>> +#define l4e_access_once(l4e) (*container_of(&ACCESS_ONCE(l4e_get_intpte(l4e)), \
>>> +                                            volatile l4_pgentry_t, l4))
>> What's wrong with l?e_read_atomic() which already exist, and are already
>> used elsewhere?
> I did consider going that route, but predicted you would object to its
> use here: Iirc you've previously voiced opinion in this direction
> (perhaps not on the page table accessors themselves but the underlying
> {read,write}_u<N>_atomic()).
>
>> If nothing, then Reviewed-by: Andrew Cooper <andrew.cooper3@citrix.com>
>> to save another round of posting.
> Let's get the above clarified - I'll be happy to switch to
> l<N>e_read_atomic() if that's fine by you.

I'd definitely prefer reusing l?e_read_atomic() than introducing a new
set of helpers.

I have two key issues with the general _atomic() infrastructure.

First, the term is overloaded for two very different things.  We use it
both for "don't subdivide this read/write" and "use a locked update",
where the latter is what is expected by the name "atomic".

Second, and specifically with {read,write}_u<N>_atomic(), it is their
construction using macros which makes them impossible to locate in the
source code.  This can trivially be fixed by not using macros.  (If it
were up to me, the use of ## would be disallowed in general, because it
does very little but to obfuscate code.)

~Andrew
diff mbox series

Patch

--- a/xen/arch/x86/mm.c
+++ b/xen/arch/x86/mm.c
@@ -2124,13 +2124,10 @@  static int mod_l1_entry(l1_pgentry_t *pl
                         struct vcpu *pt_vcpu, struct domain *pg_dom)
 {
     bool preserve_ad = (cmd == MMU_PT_UPDATE_PRESERVE_AD);
-    l1_pgentry_t ol1e;
+    l1_pgentry_t ol1e = l1e_access_once(*pl1e);
     struct domain *pt_dom = pt_vcpu->domain;
     int rc = 0;
 
-    if ( unlikely(__copy_from_user(&ol1e, pl1e, sizeof(ol1e)) != 0) )
-        return -EFAULT;
-
     ASSERT(!paging_mode_refcounts(pt_dom));
 
     if ( l1e_get_flags(nl1e) & _PAGE_PRESENT )
@@ -2248,8 +2245,7 @@  static int mod_l2_entry(l2_pgentry_t *pl
         return -EPERM;
     }
 
-    if ( unlikely(__copy_from_user(&ol2e, pl2e, sizeof(ol2e)) != 0) )
-        return -EFAULT;
+    ol2e = l2e_access_once(*pl2e);
 
     if ( l2e_get_flags(nl2e) & _PAGE_PRESENT )
     {
@@ -2311,8 +2307,7 @@  static int mod_l3_entry(l3_pgentry_t *pl
     if ( is_pv_32bit_domain(d) && (pgentry_ptr_to_slot(pl3e) >= 3) )
         return -EINVAL;
 
-    if ( unlikely(__copy_from_user(&ol3e, pl3e, sizeof(ol3e)) != 0) )
-        return -EFAULT;
+    ol3e = l3e_access_once(*pl3e);
 
     if ( l3e_get_flags(nl3e) & _PAGE_PRESENT )
     {
@@ -2378,8 +2373,7 @@  static int mod_l4_entry(l4_pgentry_t *pl
         return -EINVAL;
     }
 
-    if ( unlikely(__copy_from_user(&ol4e, pl4e, sizeof(ol4e)) != 0) )
-        return -EFAULT;
+    ol4e = l4e_access_once(*pl4e);
 
     if ( l4e_get_flags(nl4e) & _PAGE_PRESENT )
     {
--- a/xen/include/asm-x86/page.h
+++ b/xen/include/asm-x86/page.h
@@ -55,6 +55,16 @@ 
 #define l4e_write(l4ep, l4e) \
     pte_write(&l4e_get_intpte(*(l4ep)), l4e_get_intpte(l4e))
 
+/* Type-correct ACCESS_ONCE() wrappers for PTE accesses. */
+#define l1e_access_once(l1e) (*container_of(&ACCESS_ONCE(l1e_get_intpte(l1e)), \
+                                            volatile l1_pgentry_t, l1))
+#define l2e_access_once(l2e) (*container_of(&ACCESS_ONCE(l2e_get_intpte(l2e)), \
+                                            volatile l2_pgentry_t, l2))
+#define l3e_access_once(l3e) (*container_of(&ACCESS_ONCE(l3e_get_intpte(l3e)), \
+                                            volatile l3_pgentry_t, l3))
+#define l4e_access_once(l4e) (*container_of(&ACCESS_ONCE(l4e_get_intpte(l4e)), \
+                                            volatile l4_pgentry_t, l4))
+
 /* Get direct integer representation of a pte's contents (intpte_t). */
 #define l1e_get_intpte(x)          ((x).l1)
 #define l2e_get_intpte(x)          ((x).l2)