diff mbox series

[v2,02/12] x86/p2m: {,un}map_mmio_regions() are HVM-only

Message ID 7f8ca70d-8bbe-bd5d-533a-c5ea81dc91a2@suse.com (mailing list archive)
State New, archived
Headers show
Series x86/p2m: restrict more code to build just for HVM | expand

Commit Message

Jan Beulich April 12, 2021, 2:06 p.m. UTC
Mirror the "translated" check the functions do to do_domctl(), allowing
the calls to be DCEd by the compiler. Add ASSERT_UNREACHABLE() to the
original checks.

Also arrange for {set,clear}_mmio_p2m_entry() and
{set,clear}_identity_p2m_entry() to respectively live next to each
other, such that clear_mmio_p2m_entry() can also be covered by the
#ifdef already covering set_mmio_p2m_entry().

Signed-off-by: Jan Beulich <jbeulich@suse.com>
---
v2: Fix build.
---
Arguably the original checks, returning success, could also be dropped
at this point.

Comments

Roger Pau Monne April 29, 2021, 2:48 p.m. UTC | #1
On Mon, Apr 12, 2021 at 04:06:34PM +0200, Jan Beulich wrote:
> Mirror the "translated" check the functions do to do_domctl(), allowing
> the calls to be DCEd by the compiler. Add ASSERT_UNREACHABLE() to the
> original checks.
> 
> Also arrange for {set,clear}_mmio_p2m_entry() and
> {set,clear}_identity_p2m_entry() to respectively live next to each
> other, such that clear_mmio_p2m_entry() can also be covered by the
> #ifdef already covering set_mmio_p2m_entry().

Seeing the increase in HVM specific regions, would it make sense to
consider splitting the HVM bits into p2m-hvm.c or some such?

> Signed-off-by: Jan Beulich <jbeulich@suse.com>
> ---
> v2: Fix build.
> ---
> Arguably the original checks, returning success, could also be dropped
> at this point.
> 
> --- a/xen/arch/x86/mm/p2m.c
> +++ b/xen/arch/x86/mm/p2m.c
> @@ -1352,52 +1352,6 @@ int set_mmio_p2m_entry(struct domain *d,
>                                 p2m_get_hostp2m(d)->default_access);
>  }
>  
> -#endif /* CONFIG_HVM */
> -
> -int set_identity_p2m_entry(struct domain *d, unsigned long gfn_l,
> -                           p2m_access_t p2ma, unsigned int flag)
> -{
> -    p2m_type_t p2mt;
> -    p2m_access_t a;
> -    gfn_t gfn = _gfn(gfn_l);
> -    mfn_t mfn;
> -    struct p2m_domain *p2m = p2m_get_hostp2m(d);
> -    int ret;
> -
> -    if ( !paging_mode_translate(p2m->domain) )
> -    {
> -        if ( !is_iommu_enabled(d) )
> -            return 0;
> -        return iommu_legacy_map(d, _dfn(gfn_l), _mfn(gfn_l),
> -                                1ul << PAGE_ORDER_4K,
> -                                IOMMUF_readable | IOMMUF_writable);
> -    }
> -
> -    gfn_lock(p2m, gfn, 0);
> -
> -    mfn = p2m->get_entry(p2m, gfn, &p2mt, &a, 0, NULL, NULL);
> -
> -    if ( p2mt == p2m_invalid || p2mt == p2m_mmio_dm )
> -        ret = p2m_set_entry(p2m, gfn, _mfn(gfn_l), PAGE_ORDER_4K,
> -                            p2m_mmio_direct, p2ma);
> -    else if ( mfn_x(mfn) == gfn_l && p2mt == p2m_mmio_direct && a == p2ma )
> -        ret = 0;
> -    else
> -    {
> -        if ( flag & XEN_DOMCTL_DEV_RDM_RELAXED )
> -            ret = 0;
> -        else
> -            ret = -EBUSY;
> -        printk(XENLOG_G_WARNING
> -               "Cannot setup identity map d%d:%lx,"
> -               " gfn already mapped to %lx.\n",
> -               d->domain_id, gfn_l, mfn_x(mfn));
> -    }
> -
> -    gfn_unlock(p2m, gfn, 0);
> -    return ret;
> -}
> -
>  /*
>   * Returns:
>   *    0        for success
> @@ -1447,6 +1401,52 @@ int clear_mmio_p2m_entry(struct domain *
>      return rc;
>  }
>  
> +#endif /* CONFIG_HVM */
> +
> +int set_identity_p2m_entry(struct domain *d, unsigned long gfn_l,
> +                           p2m_access_t p2ma, unsigned int flag)
> +{
> +    p2m_type_t p2mt;
> +    p2m_access_t a;
> +    gfn_t gfn = _gfn(gfn_l);
> +    mfn_t mfn;
> +    struct p2m_domain *p2m = p2m_get_hostp2m(d);
> +    int ret;
> +
> +    if ( !paging_mode_translate(p2m->domain) )
> +    {
> +        if ( !is_iommu_enabled(d) )
> +            return 0;
> +        return iommu_legacy_map(d, _dfn(gfn_l), _mfn(gfn_l),
> +                                1ul << PAGE_ORDER_4K,
> +                                IOMMUF_readable | IOMMUF_writable);
> +    }
> +
> +    gfn_lock(p2m, gfn, 0);
> +
> +    mfn = p2m->get_entry(p2m, gfn, &p2mt, &a, 0, NULL, NULL);
> +
> +    if ( p2mt == p2m_invalid || p2mt == p2m_mmio_dm )
> +        ret = p2m_set_entry(p2m, gfn, _mfn(gfn_l), PAGE_ORDER_4K,
> +                            p2m_mmio_direct, p2ma);
> +    else if ( mfn_x(mfn) == gfn_l && p2mt == p2m_mmio_direct && a == p2ma )
> +        ret = 0;
> +    else
> +    {
> +        if ( flag & XEN_DOMCTL_DEV_RDM_RELAXED )
> +            ret = 0;
> +        else
> +            ret = -EBUSY;
> +        printk(XENLOG_G_WARNING
> +               "Cannot setup identity map d%d:%lx,"
> +               " gfn already mapped to %lx.\n",
> +               d->domain_id, gfn_l, mfn_x(mfn));
> +    }
> +
> +    gfn_unlock(p2m, gfn, 0);
> +    return ret;
> +}
> +
>  int clear_identity_p2m_entry(struct domain *d, unsigned long gfn_l)
>  {
>      p2m_type_t p2mt;
> @@ -1892,6 +1892,8 @@ void *map_domain_gfn(struct p2m_domain *
>      return map_domain_page(*mfn);
>  }
>  
> +#ifdef CONFIG_HVM
> +
>  static unsigned int mmio_order(const struct domain *d,
>                                 unsigned long start_fn, unsigned long nr)
>  {
> @@ -1932,7 +1934,10 @@ int map_mmio_regions(struct domain *d,
>      unsigned int iter, order;
>  
>      if ( !paging_mode_translate(d) )
> +    {
> +        ASSERT_UNREACHABLE();
>          return 0;
> +    }
>  
>      for ( iter = i = 0; i < nr && iter < MAP_MMIO_MAX_ITER;
>            i += 1UL << order, ++iter )
> @@ -1964,7 +1969,10 @@ int unmap_mmio_regions(struct domain *d,
>      unsigned int iter, order;
>  
>      if ( !paging_mode_translate(d) )
> +    {
> +        ASSERT_UNREACHABLE();
>          return 0;

Maybe consider returning an error here now instead of silently
failing? It's not supposed to be reached, so getting here likely means
something else has gone wrong and it's best to just report an error?

The rest LGTM:

Acked-by: Roger Pau Monné <roger.pau@citrix.com>

Thanks, Roger.
Jan Beulich April 29, 2021, 3:01 p.m. UTC | #2
On 29.04.2021 16:48, Roger Pau Monné wrote:
> On Mon, Apr 12, 2021 at 04:06:34PM +0200, Jan Beulich wrote:
>> Mirror the "translated" check the functions do to do_domctl(), allowing
>> the calls to be DCEd by the compiler. Add ASSERT_UNREACHABLE() to the
>> original checks.
>>
>> Also arrange for {set,clear}_mmio_p2m_entry() and
>> {set,clear}_identity_p2m_entry() to respectively live next to each
>> other, such that clear_mmio_p2m_entry() can also be covered by the
>> #ifdef already covering set_mmio_p2m_entry().
> 
> Seeing the increase in HVM specific regions, would it make sense to
> consider splitting the HVM bits into p2m-hvm.c or some such?

As said on the 01/12 sub-thread, I see the goal as p2m.c as a whole
becoming HVM specific.

>> @@ -1932,7 +1934,10 @@ int map_mmio_regions(struct domain *d,
>>      unsigned int iter, order;
>>  
>>      if ( !paging_mode_translate(d) )
>> +    {
>> +        ASSERT_UNREACHABLE();
>>          return 0;
>> +    }
>>  
>>      for ( iter = i = 0; i < nr && iter < MAP_MMIO_MAX_ITER;
>>            i += 1UL << order, ++iter )
>> @@ -1964,7 +1969,10 @@ int unmap_mmio_regions(struct domain *d,
>>      unsigned int iter, order;
>>  
>>      if ( !paging_mode_translate(d) )
>> +    {
>> +        ASSERT_UNREACHABLE();
>>          return 0;
> 
> Maybe consider returning an error here now instead of silently
> failing? It's not supposed to be reached, so getting here likely means
> something else has gone wrong and it's best to just report an error?

Can do, sure. Would be -EOPNOTSUPP.

> The rest LGTM:
> 
> Acked-by: Roger Pau Monné <roger.pau@citrix.com>

Thanks.

Jan
diff mbox series

Patch

--- a/xen/arch/x86/mm/p2m.c
+++ b/xen/arch/x86/mm/p2m.c
@@ -1352,52 +1352,6 @@  int set_mmio_p2m_entry(struct domain *d,
                                p2m_get_hostp2m(d)->default_access);
 }
 
-#endif /* CONFIG_HVM */
-
-int set_identity_p2m_entry(struct domain *d, unsigned long gfn_l,
-                           p2m_access_t p2ma, unsigned int flag)
-{
-    p2m_type_t p2mt;
-    p2m_access_t a;
-    gfn_t gfn = _gfn(gfn_l);
-    mfn_t mfn;
-    struct p2m_domain *p2m = p2m_get_hostp2m(d);
-    int ret;
-
-    if ( !paging_mode_translate(p2m->domain) )
-    {
-        if ( !is_iommu_enabled(d) )
-            return 0;
-        return iommu_legacy_map(d, _dfn(gfn_l), _mfn(gfn_l),
-                                1ul << PAGE_ORDER_4K,
-                                IOMMUF_readable | IOMMUF_writable);
-    }
-
-    gfn_lock(p2m, gfn, 0);
-
-    mfn = p2m->get_entry(p2m, gfn, &p2mt, &a, 0, NULL, NULL);
-
-    if ( p2mt == p2m_invalid || p2mt == p2m_mmio_dm )
-        ret = p2m_set_entry(p2m, gfn, _mfn(gfn_l), PAGE_ORDER_4K,
-                            p2m_mmio_direct, p2ma);
-    else if ( mfn_x(mfn) == gfn_l && p2mt == p2m_mmio_direct && a == p2ma )
-        ret = 0;
-    else
-    {
-        if ( flag & XEN_DOMCTL_DEV_RDM_RELAXED )
-            ret = 0;
-        else
-            ret = -EBUSY;
-        printk(XENLOG_G_WARNING
-               "Cannot setup identity map d%d:%lx,"
-               " gfn already mapped to %lx.\n",
-               d->domain_id, gfn_l, mfn_x(mfn));
-    }
-
-    gfn_unlock(p2m, gfn, 0);
-    return ret;
-}
-
 /*
  * Returns:
  *    0        for success
@@ -1447,6 +1401,52 @@  int clear_mmio_p2m_entry(struct domain *
     return rc;
 }
 
+#endif /* CONFIG_HVM */
+
+int set_identity_p2m_entry(struct domain *d, unsigned long gfn_l,
+                           p2m_access_t p2ma, unsigned int flag)
+{
+    p2m_type_t p2mt;
+    p2m_access_t a;
+    gfn_t gfn = _gfn(gfn_l);
+    mfn_t mfn;
+    struct p2m_domain *p2m = p2m_get_hostp2m(d);
+    int ret;
+
+    if ( !paging_mode_translate(p2m->domain) )
+    {
+        if ( !is_iommu_enabled(d) )
+            return 0;
+        return iommu_legacy_map(d, _dfn(gfn_l), _mfn(gfn_l),
+                                1ul << PAGE_ORDER_4K,
+                                IOMMUF_readable | IOMMUF_writable);
+    }
+
+    gfn_lock(p2m, gfn, 0);
+
+    mfn = p2m->get_entry(p2m, gfn, &p2mt, &a, 0, NULL, NULL);
+
+    if ( p2mt == p2m_invalid || p2mt == p2m_mmio_dm )
+        ret = p2m_set_entry(p2m, gfn, _mfn(gfn_l), PAGE_ORDER_4K,
+                            p2m_mmio_direct, p2ma);
+    else if ( mfn_x(mfn) == gfn_l && p2mt == p2m_mmio_direct && a == p2ma )
+        ret = 0;
+    else
+    {
+        if ( flag & XEN_DOMCTL_DEV_RDM_RELAXED )
+            ret = 0;
+        else
+            ret = -EBUSY;
+        printk(XENLOG_G_WARNING
+               "Cannot setup identity map d%d:%lx,"
+               " gfn already mapped to %lx.\n",
+               d->domain_id, gfn_l, mfn_x(mfn));
+    }
+
+    gfn_unlock(p2m, gfn, 0);
+    return ret;
+}
+
 int clear_identity_p2m_entry(struct domain *d, unsigned long gfn_l)
 {
     p2m_type_t p2mt;
@@ -1892,6 +1892,8 @@  void *map_domain_gfn(struct p2m_domain *
     return map_domain_page(*mfn);
 }
 
+#ifdef CONFIG_HVM
+
 static unsigned int mmio_order(const struct domain *d,
                                unsigned long start_fn, unsigned long nr)
 {
@@ -1932,7 +1934,10 @@  int map_mmio_regions(struct domain *d,
     unsigned int iter, order;
 
     if ( !paging_mode_translate(d) )
+    {
+        ASSERT_UNREACHABLE();
         return 0;
+    }
 
     for ( iter = i = 0; i < nr && iter < MAP_MMIO_MAX_ITER;
           i += 1UL << order, ++iter )
@@ -1964,7 +1969,10 @@  int unmap_mmio_regions(struct domain *d,
     unsigned int iter, order;
 
     if ( !paging_mode_translate(d) )
+    {
+        ASSERT_UNREACHABLE();
         return 0;
+    }
 
     for ( iter = i = 0; i < nr && iter < MAP_MMIO_MAX_ITER;
           i += 1UL << order, ++iter )
@@ -1986,8 +1994,6 @@  int unmap_mmio_regions(struct domain *d,
     return i == nr ? 0 : i ?: ret;
 }
 
-#ifdef CONFIG_HVM
-
 int altp2m_get_effective_entry(struct p2m_domain *ap2m, gfn_t gfn, mfn_t *mfn,
                                p2m_type_t *t, p2m_access_t *a,
                                bool prepopulate)
--- a/xen/common/domctl.c
+++ b/xen/common/domctl.c
@@ -750,6 +750,9 @@  long do_domctl(XEN_GUEST_HANDLE_PARAM(xe
         if ( ret )
             break;
 
+        if ( !paging_mode_translate(d) )
+            break;
+
         if ( add )
         {
             printk(XENLOG_G_DEBUG
--- a/xen/include/asm-x86/p2m.h
+++ b/xen/include/asm-x86/p2m.h
@@ -678,11 +678,19 @@  int p2m_finish_type_change(struct domain
 int p2m_is_logdirty_range(struct p2m_domain *, unsigned long start,
                           unsigned long end);
 
+#ifdef CONFIG_HVM
 /* Set mmio addresses in the p2m table (for pass-through) */
 int set_mmio_p2m_entry(struct domain *d, gfn_t gfn, mfn_t mfn,
                        unsigned int order);
 int clear_mmio_p2m_entry(struct domain *d, unsigned long gfn, mfn_t mfn,
                          unsigned int order);
+#else
+static inline int clear_mmio_p2m_entry(struct domain *d, unsigned long gfn,
+                                       mfn_t mfn, unsigned int order)
+{
+    return -EIO;
+}
+#endif
 
 /* Set identity addresses in the p2m table (for pass-through) */
 int set_identity_p2m_entry(struct domain *d, unsigned long gfn,