diff mbox series

[v2,2/2] gnttab: restrict GNTTABOP_cache_flush to Arm

Message ID 0c2e7ded-45c2-485f-9184-c71a58f56109@suse.com (mailing list archive)
State New
Headers show
Series gnttab: hypervisor side XSA-448 follow-up | expand

Commit Message

Jan Beulich Feb. 26, 2024, 10:45 a.m. UTC
This special purpose operation possibly shouldn't have been a grant
table sub-op, but instead an arch-specific memory management one. No
users are known or supposed to exist outside of Arm. Along with adding
suitable #ifdef-s, move a helper function used solely here into the
first of these new #ifdef-s.

With the functionality restricted, questionable helper functions can
then also be purged from x86.

Signed-off-by: Jan Beulich <jbeulich@suse.com>
---
v2: New.
diff mbox series

Patch

--- a/xen/arch/x86/include/asm/flushtlb.h
+++ b/xen/arch/x86/include/asm/flushtlb.h
@@ -182,21 +182,6 @@  void flush_area_mask(const cpumask_t *ma
 }
 
 static inline void flush_page_to_ram(unsigned long mfn, bool sync_icache) {}
-static inline int invalidate_dcache_va_range(const void *p,
-                                             unsigned long size)
-{ return -EOPNOTSUPP; }
-static inline int clean_and_invalidate_dcache_va_range(const void *p,
-                                                       unsigned long size)
-{
-    unsigned int order = get_order_from_bytes(size);
-    /* sub-page granularity support needs to be added if necessary */
-    flush_area_local(p, FLUSH_CACHE|FLUSH_ORDER(order));
-    return 0;
-}
-static inline int clean_dcache_va_range(const void *p, unsigned long size)
-{
-    return clean_and_invalidate_dcache_va_range(p, size);
-}
 
 unsigned int guest_flush_tlb_flags(const struct domain *d);
 void guest_flush_tlb_mask(const struct domain *d, const cpumask_t *mask);
--- a/xen/common/grant_table.c
+++ b/xen/common/grant_table.c
@@ -940,42 +940,6 @@  static void reduce_status_for_pin(struct
         gnttab_clear_flags(rd, clear_flags, status);
 }
 
-static struct active_grant_entry *grant_map_exists(const struct domain *ld,
-                                                   struct grant_table *rgt,
-                                                   mfn_t mfn,
-                                                   grant_ref_t *cur_ref)
-{
-    grant_ref_t ref, max_iter;
-
-    /*
-     * The remote grant table should be locked but the percpu rwlock
-     * cannot be checked for read lock without race conditions or high
-     * overhead so we cannot use an ASSERT
-     *
-     *   ASSERT(rw_is_locked(&rgt->lock));
-     */
-
-    max_iter = min(*cur_ref + (1 << GNTTABOP_CONTINUATION_ARG_SHIFT),
-                   nr_grant_entries(rgt));
-    for ( ref = *cur_ref; ref < max_iter; ref++ )
-    {
-        struct active_grant_entry *act = active_entry_acquire(rgt, ref);
-
-        if ( act->pin && act->domid == ld->domain_id &&
-             mfn_eq(act->mfn, mfn) )
-            return act;
-        active_entry_release(act);
-    }
-
-    if ( ref < nr_grant_entries(rgt) )
-    {
-        *cur_ref = ref;
-        return NULL;
-    }
-
-    return ERR_PTR(-EINVAL);
-}
-
 union maptrack_node {
     struct {
         /* Radix tree slot pointers use two of the bits. */
@@ -3519,6 +3483,44 @@  gnttab_swap_grant_ref(XEN_GUEST_HANDLE_P
     return 0;
 }
 
+#ifdef CONFIG_ARM
+
+static struct active_grant_entry *grant_map_exists(const struct domain *ld,
+                                                   struct grant_table *rgt,
+                                                   mfn_t mfn,
+                                                   grant_ref_t *cur_ref)
+{
+    grant_ref_t ref, max_iter;
+
+    /*
+     * The remote grant table should be locked but the percpu rwlock
+     * cannot be checked for read lock without race conditions or high
+     * overhead so we cannot use an ASSERT
+     *
+     *   ASSERT(rw_is_locked(&rgt->lock));
+     */
+
+    max_iter = min(*cur_ref + (1 << GNTTABOP_CONTINUATION_ARG_SHIFT),
+                   nr_grant_entries(rgt));
+    for ( ref = *cur_ref; ref < max_iter; ref++ )
+    {
+        struct active_grant_entry *act = active_entry_acquire(rgt, ref);
+
+        if ( act->pin && act->domid == ld->domain_id &&
+             mfn_eq(act->mfn, mfn) )
+            return act;
+        active_entry_release(act);
+    }
+
+    if ( ref < nr_grant_entries(rgt) )
+    {
+        *cur_ref = ref;
+        return NULL;
+    }
+
+    return ERR_PTR(-EINVAL);
+}
+
 static int _cache_flush(const gnttab_cache_flush_t *cflush, grant_ref_t *cur_ref)
 {
     struct domain *d, *owner;
@@ -3633,6 +3635,8 @@  gnttab_cache_flush(XEN_GUEST_HANDLE_PARA
     return 0;
 }
 
+#endif /* CONFIG_ARM */
+
 long do_grant_table_op(
     unsigned int cmd, XEN_GUEST_HANDLE_PARAM(void) uop, unsigned int count)
 {
@@ -3776,6 +3780,7 @@  long (do_grant_table_op)(
 
     case GNTTABOP_cache_flush:
     {
+#ifdef CONFIG_ARM
         XEN_GUEST_HANDLE_PARAM(gnttab_cache_flush_t) cflush =
             guest_handle_cast(uop, gnttab_cache_flush_t);
 
@@ -3788,6 +3793,9 @@  long (do_grant_table_op)(
             uop = guest_handle_cast(cflush, void);
             opaque_out = opaque_in;
         }
+#else
+        rc = -EOPNOTSUPP;
+#endif
         break;
     }