diff mbox

[v4,18/31] x86/mm: move and rename map_ldt_shadow_page

Message ID 20170817144456.18989-19-wei.liu2@citrix.com (mailing list archive)
State New, archived
Headers show

Commit Message

Wei Liu Aug. 17, 2017, 2:44 p.m. UTC
Take the chance to change v to curr and d to currd in code. Also
change the return type to bool.  Fix up all the call sites.

Signed-off-by: Wei Liu <wei.liu2@citrix.com>
---
 xen/arch/x86/mm.c                   | 43 -------------------------------------
 xen/arch/x86/pv/descriptor-tables.c | 42 ++++++++++++++++++++++++++++++++++++
 xen/arch/x86/traps.c                |  5 +++--
 xen/include/asm-x86/mm.h            |  2 --
 xen/include/asm-x86/pv/processor.h  |  2 ++
 5 files changed, 47 insertions(+), 47 deletions(-)
diff mbox

Patch

diff --git a/xen/arch/x86/mm.c b/xen/arch/x86/mm.c
index 6cbcdabcd2..7f175bacc9 100644
--- a/xen/arch/x86/mm.c
+++ b/xen/arch/x86/mm.c
@@ -580,49 +580,6 @@  static int alloc_segdesc_page(struct page_info *page)
     return i == 512 ? 0 : -EINVAL;
 }
 
-
-/* Map shadow page at offset @off. */
-int map_ldt_shadow_page(unsigned int off)
-{
-    struct vcpu *v = current;
-    struct domain *d = v->domain;
-    unsigned long gmfn;
-    struct page_info *page;
-    l1_pgentry_t l1e, nl1e;
-    unsigned long gva = v->arch.pv_vcpu.ldt_base + (off << PAGE_SHIFT);
-    int okay;
-
-    BUG_ON(unlikely(in_irq()));
-
-    if ( is_pv_32bit_domain(d) )
-        gva = (u32)gva;
-    pv_get_guest_eff_kern_l1e(v, gva, &l1e);
-    if ( unlikely(!(l1e_get_flags(l1e) & _PAGE_PRESENT)) )
-        return 0;
-
-    gmfn = l1e_get_pfn(l1e);
-    page = get_page_from_gfn(d, gmfn, NULL, P2M_ALLOC);
-    if ( unlikely(!page) )
-        return 0;
-
-    okay = get_page_type(page, PGT_seg_desc_page);
-    if ( unlikely(!okay) )
-    {
-        put_page(page);
-        return 0;
-    }
-
-    nl1e = l1e_from_pfn(page_to_mfn(page), l1e_get_flags(l1e) | _PAGE_RW);
-
-    spin_lock(&v->arch.pv_vcpu.shadow_ldt_lock);
-    l1e_write(&gdt_ldt_ptes(d, v)[off + 16], nl1e);
-    v->arch.pv_vcpu.shadow_ldt_mapcnt++;
-    spin_unlock(&v->arch.pv_vcpu.shadow_ldt_lock);
-
-    return 1;
-}
-
-
 bool get_page_from_mfn(mfn_t mfn, struct domain *d)
 {
     struct page_info *page = mfn_to_page(mfn_x(mfn));
diff --git a/xen/arch/x86/pv/descriptor-tables.c b/xen/arch/x86/pv/descriptor-tables.c
index a302812774..6ac5c736cf 100644
--- a/xen/arch/x86/pv/descriptor-tables.c
+++ b/xen/arch/x86/pv/descriptor-tables.c
@@ -24,6 +24,7 @@ 
 #include <xen/hypercall.h>
 
 #include <asm/p2m.h>
+#include <asm/pv/mm.h>
 #include <asm/pv/processor.h>
 
 /*************************
@@ -217,6 +218,47 @@  int compat_update_descriptor(u32 pa_lo, u32 pa_hi, u32 desc_lo, u32 desc_hi)
                                 desc_lo | ((u64)desc_hi << 32));
 }
 
+/* Map shadow page at offset @off. */
+bool pv_map_ldt_shadow_page(unsigned int off)
+{
+    struct vcpu *curr = current;
+    struct domain *currd = curr->domain;
+    unsigned long gmfn;
+    struct page_info *page;
+    l1_pgentry_t l1e, nl1e;
+    unsigned long gva = curr->arch.pv_vcpu.ldt_base + (off << PAGE_SHIFT);
+    int okay;
+
+    BUG_ON(unlikely(in_irq()));
+
+    if ( is_pv_32bit_domain(currd) )
+        gva = (u32)gva;
+    pv_get_guest_eff_kern_l1e(curr, gva, &l1e);
+    if ( unlikely(!(l1e_get_flags(l1e) & _PAGE_PRESENT)) )
+        return false;
+
+    gmfn = l1e_get_pfn(l1e);
+    page = get_page_from_gfn(currd, gmfn, NULL, P2M_ALLOC);
+    if ( unlikely(!page) )
+        return false;
+
+    okay = get_page_type(page, PGT_seg_desc_page);
+    if ( unlikely(!okay) )
+    {
+        put_page(page);
+        return false;
+    }
+
+    nl1e = l1e_from_pfn(page_to_mfn(page), l1e_get_flags(l1e) | _PAGE_RW);
+
+    spin_lock(&curr->arch.pv_vcpu.shadow_ldt_lock);
+    l1e_write(&gdt_ldt_ptes(currd, curr)[off + 16], nl1e);
+    curr->arch.pv_vcpu.shadow_ldt_mapcnt++;
+    spin_unlock(&curr->arch.pv_vcpu.shadow_ldt_lock);
+
+    return true;
+}
+
 /*
  * Local variables:
  * mode: C
diff --git a/xen/arch/x86/traps.c b/xen/arch/x86/traps.c
index b93b3d1317..dbdcdf62a6 100644
--- a/xen/arch/x86/traps.c
+++ b/xen/arch/x86/traps.c
@@ -77,6 +77,7 @@ 
 #include <public/arch-x86/cpuid.h>
 #include <asm/cpuid.h>
 #include <xsm/xsm.h>
+#include <asm/pv/processor.h>
 #include <asm/pv/traps.h>
 
 /*
@@ -1100,7 +1101,7 @@  static int handle_gdt_ldt_mapping_fault(unsigned long offset,
     /*
      * If the fault is in another vcpu's area, it cannot be due to
      * a GDT/LDT descriptor load. Thus we can reasonably exit immediately, and
-     * indeed we have to since map_ldt_shadow_page() works correctly only on
+     * indeed we have to since pv_map_ldt_shadow_page() works correctly only on
      * accesses to a vcpu's own area.
      */
     if ( vcpu_area != curr->vcpu_id )
@@ -1112,7 +1113,7 @@  static int handle_gdt_ldt_mapping_fault(unsigned long offset,
     if ( likely(is_ldt_area) )
     {
         /* LDT fault: Copy a mapping from the guest's LDT, if it is valid. */
-        if ( likely(map_ldt_shadow_page(offset >> PAGE_SHIFT)) )
+        if ( likely(pv_map_ldt_shadow_page(offset >> PAGE_SHIFT)) )
         {
             if ( guest_mode(regs) )
                 trace_trap_two_addr(TRC_PV_GDT_LDT_MAPPING_FAULT,
diff --git a/xen/include/asm-x86/mm.h b/xen/include/asm-x86/mm.h
index c6e1d01c7d..c11fa680bd 100644
--- a/xen/include/asm-x86/mm.h
+++ b/xen/include/asm-x86/mm.h
@@ -530,8 +530,6 @@  long subarch_memory_op(unsigned long cmd, XEN_GUEST_HANDLE_PARAM(void) arg);
 int compat_arch_memory_op(unsigned long cmd, XEN_GUEST_HANDLE_PARAM(void));
 int compat_subarch_memory_op(int op, XEN_GUEST_HANDLE_PARAM(void));
 
-int map_ldt_shadow_page(unsigned int);
-
 #define NIL(type) ((type *)-sizeof(type))
 #define IS_NIL(ptr) (!((uintptr_t)(ptr) + sizeof(*(ptr))))
 
diff --git a/xen/include/asm-x86/pv/processor.h b/xen/include/asm-x86/pv/processor.h
index 8ab5773871..6f9e1afe8a 100644
--- a/xen/include/asm-x86/pv/processor.h
+++ b/xen/include/asm-x86/pv/processor.h
@@ -25,6 +25,7 @@ 
 
 void pv_destroy_gdt(struct vcpu *d);
 long pv_set_gdt(struct vcpu *d, unsigned long *frames, unsigned int entries);
+bool pv_map_ldt_shadow_page(unsigned int);
 
 #else
 
@@ -34,6 +35,7 @@  static inline void pv_destroy_gdt(struct vcpu *d) {}
 static inline long pv_set_gdt(struct vcpu *d, unsigned long *frames,
                               unsigned int entries)
 { return -EINVAL; }
+static inline bool pv_map_ldt_shadow_page(unsigned int) { return false; }
 
 #endif