diff mbox

[for-4.8,v2,12/23] xen/arm: p2m: Introduce p2m_get_entry and use it to implement __p2m_lookup

Message ID 1473938919-31976-13-git-send-email-julien.grall@arm.com (mailing list archive)
State New, archived
Headers show

Commit Message

Julien Grall Sept. 15, 2016, 11:28 a.m. UTC
Currently, for a given GFN, the function __p2m_lookup will only return
the associated MFN and the p2m type of the mapping.

In some case we need the order of the mapping and the memaccess
permission. Rather than providing a separate function for this purpose,
it is better to implement a generic function to return all the
information.

To avoid passing dummy parameter, a caller that does not need a
specific information can use NULL instead.

The list of the informations retrieved is based on the x86 version. All
of them will be used in follow-up patches.

It might have been possible to extend __p2m_lookup, however I choose to
reimplement it from scratch to allow sharing some helpers with the
function that will update the P2M (will be added in a follow-up patch).

Signed-off-by: Julien Grall <julien.grall@arm.com>

---
    Changes in v2:
        - Export p2m_get_entry
        - Fix the computation of the order when there is no mapping
        - Use level_orders rather than level_shifts - PAGE_SHIFT
        - Update documentation
        - Fix typoes
        - The definition of level_orders has been moved in an earlier
        patch
---
 xen/arch/arm/p2m.c        | 188 +++++++++++++++++++++++++++++++++++-----------
 xen/include/asm-arm/p2m.h |   8 ++
 2 files changed, 154 insertions(+), 42 deletions(-)

Comments

Stefano Stabellini Sept. 17, 2016, 1:36 a.m. UTC | #1
On Thu, 15 Sep 2016, Julien Grall wrote:
> Currently, for a given GFN, the function __p2m_lookup will only return
> the associated MFN and the p2m type of the mapping.
> 
> In some case we need the order of the mapping and the memaccess
> permission. Rather than providing a separate function for this purpose,
> it is better to implement a generic function to return all the
> information.
> 
> To avoid passing dummy parameter, a caller that does not need a
> specific information can use NULL instead.
> 
> The list of the informations retrieved is based on the x86 version. All
> of them will be used in follow-up patches.
> 
> It might have been possible to extend __p2m_lookup, however I choose to
> reimplement it from scratch to allow sharing some helpers with the
> function that will update the P2M (will be added in a follow-up patch).
> 
> Signed-off-by: Julien Grall <julien.grall@arm.com>

Reviewed-by: Stefano Stabellini <sstabellini@kernel.org>


> ---
>     Changes in v2:
>         - Export p2m_get_entry
>         - Fix the computation of the order when there is no mapping
>         - Use level_orders rather than level_shifts - PAGE_SHIFT
>         - Update documentation
>         - Fix typoes
>         - The definition of level_orders has been moved in an earlier
>         patch
> ---
>  xen/arch/arm/p2m.c        | 188 +++++++++++++++++++++++++++++++++++-----------
>  xen/include/asm-arm/p2m.h |   8 ++
>  2 files changed, 154 insertions(+), 42 deletions(-)
> 
> diff --git a/xen/arch/arm/p2m.c b/xen/arch/arm/p2m.c
> index b2a29ad..6e56b97 100644
> --- a/xen/arch/arm/p2m.c
> +++ b/xen/arch/arm/p2m.c
> @@ -238,28 +238,104 @@ static lpae_t *p2m_get_root_pointer(struct p2m_domain *p2m,
>  
>  /*
>   * Lookup the MFN corresponding to a domain's GFN.
> + * Lookup mem access in the ratrix tree.
> + * The entries associated to the GFN is considered valid.
> + */
> +static p2m_access_t p2m_mem_access_radix_get(struct p2m_domain *p2m, gfn_t gfn)
> +{
> +    void *ptr;
> +
> +    if ( !p2m->mem_access_enabled )
> +        return p2m->default_access;
> +
> +    ptr = radix_tree_lookup(&p2m->mem_access_settings, gfn_x(gfn));
> +    if ( !ptr )
> +        return p2m_access_rwx;
> +    else
> +        return radix_tree_ptr_to_int(ptr);
> +}
> +
> +#define GUEST_TABLE_MAP_FAILED 0
> +#define GUEST_TABLE_SUPER_PAGE 1
> +#define GUEST_TABLE_NORMAL_PAGE 2
> +
> +static int p2m_create_table(struct p2m_domain *p2m, lpae_t *entry,
> +                            int level_shift);
> +
> +/*
> + * Take the currently mapped table, find the corresponding GFN entry,
> + * and map the next table, if available. The previous table will be
> + * unmapped if the next level was mapped (e.g GUEST_TABLE_NORMAL_PAGE
> + * returned).
>   *
> - * There are no processor functions to do a stage 2 only lookup therefore we
> - * do a a software walk.
> + * The read_only parameters indicates whether intermediate tables should
> + * be allocated when not present.
> + *
> + * Return values:
> + *  GUEST_TABLE_MAP_FAILED: Either read_only was set and the entry
> + *  was empty, or allocating a new page failed.
> + *  GUEST_TABLE_NORMAL_PAGE: next level mapped normally
> + *  GUEST_TABLE_SUPER_PAGE: The next entry points to a superpage.
>   */
> -static mfn_t __p2m_lookup(struct domain *d, gfn_t gfn, p2m_type_t *t)
> +static int p2m_next_level(struct p2m_domain *p2m, bool read_only,
> +                          lpae_t **table, unsigned int offset)
>  {
> -    struct p2m_domain *p2m = &d->arch.p2m;
> -    const paddr_t paddr = pfn_to_paddr(gfn_x(gfn));
> -    const unsigned int offsets[4] = {
> -        zeroeth_table_offset(paddr),
> -        first_table_offset(paddr),
> -        second_table_offset(paddr),
> -        third_table_offset(paddr)
> -    };
> -    const paddr_t masks[4] = {
> -        ZEROETH_MASK, FIRST_MASK, SECOND_MASK, THIRD_MASK
> -    };
> -    lpae_t pte, *map;
> +    lpae_t *entry;
> +    int ret;
> +    mfn_t mfn;
> +
> +    entry = *table + offset;
> +
> +    if ( !p2m_valid(*entry) )
> +    {
> +        if ( read_only )
> +            return GUEST_TABLE_MAP_FAILED;
> +
> +        ret = p2m_create_table(p2m, entry, /* not used */ ~0);
> +        if ( ret )
> +            return GUEST_TABLE_MAP_FAILED;
> +    }
> +
> +    /* The function p2m_next_level is never called at the 3rd level */
> +    if ( p2m_mapping(*entry) )
> +        return GUEST_TABLE_SUPER_PAGE;
> +
> +    mfn = _mfn(entry->p2m.base);
> +
> +    unmap_domain_page(*table);
> +    *table = map_domain_page(mfn);
> +
> +    return GUEST_TABLE_NORMAL_PAGE;
> +}
> +
> +/*
> + * Get the details of a given gfn.
> + *
> + * If the entry is present, the associated MFN will be returned and the
> + * access and type filled up. The page_order will correspond to the
> + * order of the mapping in the page table (i.e it could be a superpage).
> + *
> + * If the entry is not present, INVALID_MFN will be returned and the
> + * page_order will be set according to the order of the invalid range.
> + */
> +mfn_t p2m_get_entry(struct p2m_domain *p2m, gfn_t gfn,
> +                    p2m_type_t *t, p2m_access_t *a,
> +                    unsigned int *page_order)
> +{
> +    paddr_t addr = pfn_to_paddr(gfn_x(gfn));
> +    unsigned int level = 0;
> +    lpae_t entry, *table;
> +    int rc;
>      mfn_t mfn = INVALID_MFN;
> -    paddr_t mask = 0;
>      p2m_type_t _t;
> -    unsigned int level;
> +
> +    /* Convenience aliases */
> +    const unsigned int offsets[4] = {
> +        zeroeth_table_offset(addr),
> +        first_table_offset(addr),
> +        second_table_offset(addr),
> +        third_table_offset(addr)
> +    };
>  
>      ASSERT(p2m_is_locked(p2m));
>      BUILD_BUG_ON(THIRD_MASK != PAGE_MASK);
> @@ -269,46 +345,74 @@ static mfn_t __p2m_lookup(struct domain *d, gfn_t gfn, p2m_type_t *t)
>  
>      *t = p2m_invalid;
>  
> -    map = p2m_get_root_pointer(p2m, gfn);
> -    if ( !map )
> -        return INVALID_MFN;
> -
> -    ASSERT(P2M_ROOT_LEVEL < 4);
> +    /* XXX: Check if the mapping is lower than the mapped gfn */
>  
> -    for ( level = P2M_ROOT_LEVEL ; level < 4 ; level++ )
> +    /* This gfn is higher than the highest the p2m map currently holds */
> +    if ( gfn_x(gfn) > gfn_x(p2m->max_mapped_gfn) )
>      {
> -        mask = masks[level];
> +        for ( level = P2M_ROOT_LEVEL; level < 3; level++ )
> +            if ( (gfn_x(gfn) & (level_masks[level] >> PAGE_SHIFT)) >
> +                 gfn_x(p2m->max_mapped_gfn) )
> +                break;
>  
> -        pte = map[offsets[level]];
> +        goto out;
> +    }
>  
> -        if ( level == 3 && !p2m_table(pte) )
> -            /* Invalid, clobber the pte */
> -            pte.bits = 0;
> -        if ( level == 3 || !p2m_table(pte) )
> -            /* Done */
> -            break;
> +    table = p2m_get_root_pointer(p2m, gfn);
>  
> -        ASSERT(level < 3);
> +    /*
> +     * the table should always be non-NULL because the gfn is below
> +     * p2m->max_mapped_gfn and the root table pages are always present.
> +     */
> +    BUG_ON(table == NULL);
>  
> -        /* Map for next level */
> -        unmap_domain_page(map);
> -        map = map_domain_page(_mfn(pte.p2m.base));
> +    for ( level = P2M_ROOT_LEVEL; level < 3; level++ )
> +    {
> +        rc = p2m_next_level(p2m, true, &table, offsets[level]);
> +        if ( rc == GUEST_TABLE_MAP_FAILED )
> +            goto out_unmap;
> +        else if ( rc != GUEST_TABLE_NORMAL_PAGE )
> +            break;
>      }
>  
> -    unmap_domain_page(map);
> +    entry = table[offsets[level]];
>  
> -    if ( p2m_valid(pte) )
> +    if ( p2m_valid(entry) )
>      {
> -        ASSERT(mask);
> -        ASSERT(pte.p2m.type != p2m_invalid);
> -        mfn = _mfn(paddr_to_pfn((pte.bits & PADDR_MASK & mask) |
> -                                (paddr & ~mask)));
> -        *t = pte.p2m.type;
> +        *t = entry.p2m.type;
> +
> +        if ( a )
> +            *a = p2m_mem_access_radix_get(p2m, gfn);
> +
> +        mfn = _mfn(entry.p2m.base);
> +        /*
> +         * The entry may point to a superpage. Find the MFN associated
> +         * to the GFN.
> +         */
> +        mfn = mfn_add(mfn, gfn_x(gfn) & ((1UL << level_orders[level]) - 1));
>      }
>  
> +out_unmap:
> +    unmap_domain_page(table);
> +
> +out:
> +    if ( page_order )
> +        *page_order = level_orders[level];
> +
>      return mfn;
>  }
>  
> +/*
> + * Lookup the MFN corresponding to a domain's GFN.
> + *
> + * There are no processor functions to do a stage 2 only lookup therefore we
> + * do a a software walk.
> + */
> +static mfn_t __p2m_lookup(struct domain *d, gfn_t gfn, p2m_type_t *t)
> +{
> +    return p2m_get_entry(&d->arch.p2m, gfn, t, NULL, NULL);
> +}
> +
>  mfn_t p2m_lookup(struct domain *d, gfn_t gfn, p2m_type_t *t)
>  {
>      mfn_t ret;
> diff --git a/xen/include/asm-arm/p2m.h b/xen/include/asm-arm/p2m.h
> index 156df5e..6fe6a37 100644
> --- a/xen/include/asm-arm/p2m.h
> +++ b/xen/include/asm-arm/p2m.h
> @@ -179,6 +179,14 @@ void p2m_dump_info(struct domain *d);
>  /* Look up the MFN corresponding to a domain's GFN. */
>  mfn_t p2m_lookup(struct domain *d, gfn_t gfn, p2m_type_t *t);
>  
> +/*
> + * Get details of a given gfn.
> + * The P2M lock should be taken by the caller.
> + */
> +mfn_t p2m_get_entry(struct p2m_domain *p2m, gfn_t gfn,
> +                    p2m_type_t *t, p2m_access_t *a,
> +                    unsigned int *page_order);
> +
>  /* Clean & invalidate caches corresponding to a region of guest address space */
>  int p2m_cache_flush(struct domain *d, gfn_t start, unsigned long nr);
>  
> -- 
> 1.9.1
>
diff mbox

Patch

diff --git a/xen/arch/arm/p2m.c b/xen/arch/arm/p2m.c
index b2a29ad..6e56b97 100644
--- a/xen/arch/arm/p2m.c
+++ b/xen/arch/arm/p2m.c
@@ -238,28 +238,104 @@  static lpae_t *p2m_get_root_pointer(struct p2m_domain *p2m,
 
 /*
  * Lookup the MFN corresponding to a domain's GFN.
+ * Lookup mem access in the ratrix tree.
+ * The entries associated to the GFN is considered valid.
+ */
+static p2m_access_t p2m_mem_access_radix_get(struct p2m_domain *p2m, gfn_t gfn)
+{
+    void *ptr;
+
+    if ( !p2m->mem_access_enabled )
+        return p2m->default_access;
+
+    ptr = radix_tree_lookup(&p2m->mem_access_settings, gfn_x(gfn));
+    if ( !ptr )
+        return p2m_access_rwx;
+    else
+        return radix_tree_ptr_to_int(ptr);
+}
+
+#define GUEST_TABLE_MAP_FAILED 0
+#define GUEST_TABLE_SUPER_PAGE 1
+#define GUEST_TABLE_NORMAL_PAGE 2
+
+static int p2m_create_table(struct p2m_domain *p2m, lpae_t *entry,
+                            int level_shift);
+
+/*
+ * Take the currently mapped table, find the corresponding GFN entry,
+ * and map the next table, if available. The previous table will be
+ * unmapped if the next level was mapped (e.g GUEST_TABLE_NORMAL_PAGE
+ * returned).
  *
- * There are no processor functions to do a stage 2 only lookup therefore we
- * do a a software walk.
+ * The read_only parameters indicates whether intermediate tables should
+ * be allocated when not present.
+ *
+ * Return values:
+ *  GUEST_TABLE_MAP_FAILED: Either read_only was set and the entry
+ *  was empty, or allocating a new page failed.
+ *  GUEST_TABLE_NORMAL_PAGE: next level mapped normally
+ *  GUEST_TABLE_SUPER_PAGE: The next entry points to a superpage.
  */
-static mfn_t __p2m_lookup(struct domain *d, gfn_t gfn, p2m_type_t *t)
+static int p2m_next_level(struct p2m_domain *p2m, bool read_only,
+                          lpae_t **table, unsigned int offset)
 {
-    struct p2m_domain *p2m = &d->arch.p2m;
-    const paddr_t paddr = pfn_to_paddr(gfn_x(gfn));
-    const unsigned int offsets[4] = {
-        zeroeth_table_offset(paddr),
-        first_table_offset(paddr),
-        second_table_offset(paddr),
-        third_table_offset(paddr)
-    };
-    const paddr_t masks[4] = {
-        ZEROETH_MASK, FIRST_MASK, SECOND_MASK, THIRD_MASK
-    };
-    lpae_t pte, *map;
+    lpae_t *entry;
+    int ret;
+    mfn_t mfn;
+
+    entry = *table + offset;
+
+    if ( !p2m_valid(*entry) )
+    {
+        if ( read_only )
+            return GUEST_TABLE_MAP_FAILED;
+
+        ret = p2m_create_table(p2m, entry, /* not used */ ~0);
+        if ( ret )
+            return GUEST_TABLE_MAP_FAILED;
+    }
+
+    /* The function p2m_next_level is never called at the 3rd level */
+    if ( p2m_mapping(*entry) )
+        return GUEST_TABLE_SUPER_PAGE;
+
+    mfn = _mfn(entry->p2m.base);
+
+    unmap_domain_page(*table);
+    *table = map_domain_page(mfn);
+
+    return GUEST_TABLE_NORMAL_PAGE;
+}
+
+/*
+ * Get the details of a given gfn.
+ *
+ * If the entry is present, the associated MFN will be returned and the
+ * access and type filled up. The page_order will correspond to the
+ * order of the mapping in the page table (i.e it could be a superpage).
+ *
+ * If the entry is not present, INVALID_MFN will be returned and the
+ * page_order will be set according to the order of the invalid range.
+ */
+mfn_t p2m_get_entry(struct p2m_domain *p2m, gfn_t gfn,
+                    p2m_type_t *t, p2m_access_t *a,
+                    unsigned int *page_order)
+{
+    paddr_t addr = pfn_to_paddr(gfn_x(gfn));
+    unsigned int level = 0;
+    lpae_t entry, *table;
+    int rc;
     mfn_t mfn = INVALID_MFN;
-    paddr_t mask = 0;
     p2m_type_t _t;
-    unsigned int level;
+
+    /* Convenience aliases */
+    const unsigned int offsets[4] = {
+        zeroeth_table_offset(addr),
+        first_table_offset(addr),
+        second_table_offset(addr),
+        third_table_offset(addr)
+    };
 
     ASSERT(p2m_is_locked(p2m));
     BUILD_BUG_ON(THIRD_MASK != PAGE_MASK);
@@ -269,46 +345,74 @@  static mfn_t __p2m_lookup(struct domain *d, gfn_t gfn, p2m_type_t *t)
 
     *t = p2m_invalid;
 
-    map = p2m_get_root_pointer(p2m, gfn);
-    if ( !map )
-        return INVALID_MFN;
-
-    ASSERT(P2M_ROOT_LEVEL < 4);
+    /* XXX: Check if the mapping is lower than the mapped gfn */
 
-    for ( level = P2M_ROOT_LEVEL ; level < 4 ; level++ )
+    /* This gfn is higher than the highest the p2m map currently holds */
+    if ( gfn_x(gfn) > gfn_x(p2m->max_mapped_gfn) )
     {
-        mask = masks[level];
+        for ( level = P2M_ROOT_LEVEL; level < 3; level++ )
+            if ( (gfn_x(gfn) & (level_masks[level] >> PAGE_SHIFT)) >
+                 gfn_x(p2m->max_mapped_gfn) )
+                break;
 
-        pte = map[offsets[level]];
+        goto out;
+    }
 
-        if ( level == 3 && !p2m_table(pte) )
-            /* Invalid, clobber the pte */
-            pte.bits = 0;
-        if ( level == 3 || !p2m_table(pte) )
-            /* Done */
-            break;
+    table = p2m_get_root_pointer(p2m, gfn);
 
-        ASSERT(level < 3);
+    /*
+     * the table should always be non-NULL because the gfn is below
+     * p2m->max_mapped_gfn and the root table pages are always present.
+     */
+    BUG_ON(table == NULL);
 
-        /* Map for next level */
-        unmap_domain_page(map);
-        map = map_domain_page(_mfn(pte.p2m.base));
+    for ( level = P2M_ROOT_LEVEL; level < 3; level++ )
+    {
+        rc = p2m_next_level(p2m, true, &table, offsets[level]);
+        if ( rc == GUEST_TABLE_MAP_FAILED )
+            goto out_unmap;
+        else if ( rc != GUEST_TABLE_NORMAL_PAGE )
+            break;
     }
 
-    unmap_domain_page(map);
+    entry = table[offsets[level]];
 
-    if ( p2m_valid(pte) )
+    if ( p2m_valid(entry) )
     {
-        ASSERT(mask);
-        ASSERT(pte.p2m.type != p2m_invalid);
-        mfn = _mfn(paddr_to_pfn((pte.bits & PADDR_MASK & mask) |
-                                (paddr & ~mask)));
-        *t = pte.p2m.type;
+        *t = entry.p2m.type;
+
+        if ( a )
+            *a = p2m_mem_access_radix_get(p2m, gfn);
+
+        mfn = _mfn(entry.p2m.base);
+        /*
+         * The entry may point to a superpage. Find the MFN associated
+         * to the GFN.
+         */
+        mfn = mfn_add(mfn, gfn_x(gfn) & ((1UL << level_orders[level]) - 1));
     }
 
+out_unmap:
+    unmap_domain_page(table);
+
+out:
+    if ( page_order )
+        *page_order = level_orders[level];
+
     return mfn;
 }
 
+/*
+ * Lookup the MFN corresponding to a domain's GFN.
+ *
+ * There are no processor functions to do a stage 2 only lookup therefore we
+ * do a a software walk.
+ */
+static mfn_t __p2m_lookup(struct domain *d, gfn_t gfn, p2m_type_t *t)
+{
+    return p2m_get_entry(&d->arch.p2m, gfn, t, NULL, NULL);
+}
+
 mfn_t p2m_lookup(struct domain *d, gfn_t gfn, p2m_type_t *t)
 {
     mfn_t ret;
diff --git a/xen/include/asm-arm/p2m.h b/xen/include/asm-arm/p2m.h
index 156df5e..6fe6a37 100644
--- a/xen/include/asm-arm/p2m.h
+++ b/xen/include/asm-arm/p2m.h
@@ -179,6 +179,14 @@  void p2m_dump_info(struct domain *d);
 /* Look up the MFN corresponding to a domain's GFN. */
 mfn_t p2m_lookup(struct domain *d, gfn_t gfn, p2m_type_t *t);
 
+/*
+ * Get details of a given gfn.
+ * The P2M lock should be taken by the caller.
+ */
+mfn_t p2m_get_entry(struct p2m_domain *p2m, gfn_t gfn,
+                    p2m_type_t *t, p2m_access_t *a,
+                    unsigned int *page_order);
+
 /* Clean & invalidate caches corresponding to a region of guest address space */
 int p2m_cache_flush(struct domain *d, gfn_t start, unsigned long nr);