diff mbox series

[v2,25/39] xen/riscv: introduce asm/p2m.h

Message ID a37e4b7115897a0265907c53667a0ede3763f0ce.1700761381.git.oleksii.kurochko@gmail.com (mailing list archive)
State Superseded
Headers show
Series Enable build of full Xen for RISC-V | expand

Commit Message

Oleksii Kurochko Nov. 24, 2023, 10:30 a.m. UTC
Signed-off-by: Oleksii Kurochko <oleksii.kurochko@gmail.com>
---
Changes in V2:
 - Nothing changed. Only rebase.
---
 xen/arch/riscv/include/asm/p2m.h | 105 +++++++++++++++++++++++++++++++
 1 file changed, 105 insertions(+)
 create mode 100644 xen/arch/riscv/include/asm/p2m.h

Comments

Jan Beulich Dec. 14, 2023, 2:19 p.m. UTC | #1
On 24.11.2023 11:30, Oleksii Kurochko wrote:
> --- /dev/null
> +++ b/xen/arch/riscv/include/asm/p2m.h
> @@ -0,0 +1,105 @@
> +#ifndef __ASM_RISCV_P2M_H__
> +#define __ASM_RISCV_P2M_H__
> +
> +#include <asm/page-bits.h>
> +
> +#define paddr_bits PADDR_BITS
> +
> +/*
> + * List of possible type for each page in the p2m entry.
> + * The number of available bit per page in the pte for this purpose is 4 bits.
> + * So it's possible to only have 16 fields. If we run out of value in the
> + * future, it's possible to use higher value for pseudo-type and don't store
> + * them in the p2m entry.
> + */
> +typedef enum {
> +    p2m_invalid = 0,    /* Nothing mapped here */
> +    p2m_ram_rw,         /* Normal read/write guest RAM */
> +    p2m_ram_ro,         /* Read-only; writes are silently dropped */
> +    p2m_mmio_direct_dev,/* Read/write mapping of genuine Device MMIO area */
> +    p2m_mmio_direct_nc, /* Read/write mapping of genuine MMIO area non-cacheable */
> +    p2m_mmio_direct_c,  /* Read/write mapping of genuine MMIO area cacheable */
> +    p2m_map_foreign_rw, /* Read/write RAM pages from foreign domain */
> +    p2m_map_foreign_ro, /* Read-only RAM pages from foreign domain */
> +    p2m_grant_map_rw,   /* Read/write grant mapping */
> +    p2m_grant_map_ro,   /* Read-only grant mapping */
> +    /* The types below are only used to decide the page attribute in the P2M */
> +    p2m_iommu_map_rw,   /* Read/write iommu mapping */
> +    p2m_iommu_map_ro,   /* Read-only iommu mapping */
> +    p2m_max_real_type,  /* Types after this won't be store in the p2m */
> +} p2m_type_t;

As indicated before, I think you should have only those types here which
you're sure you'll need, or even just those which you really need right
away. I question in particular p2m_mmio_direct_*, which all look like
you simply took them from Arm, without regard as to applicability to
RISC-V. The fewer types you have here, the easier it is going to be to
tell what needs adding and what is already _properly_ supported.

> +#include <xen/p2m-common.h>
> +
> +static inline int get_page_and_type(struct page_info *page,
> +                                    struct domain *domain,
> +                                    unsigned long type)
> +{
> +    BUG();
> +    return 1;
> +}

Imo despite the BUG() any such stub would better return failure, just
like ...

> +/* Look up a GFN and take a reference count on the backing page. */
> +typedef unsigned int p2m_query_t;
> +#define P2M_ALLOC    (1u<<0)   /* Populate PoD and paged-out entries */
> +#define P2M_UNSHARE  (1u<<1)   /* Break CoW sharing */
> +
> +static inline struct page_info *get_page_from_gfn(
> +    struct domain *d, unsigned long gfn, p2m_type_t *t, p2m_query_t q)
> +{
> +    BUG();
> +    return NULL;
> +}

... you do here. May apply again further down.

> +static inline void memory_type_changed(struct domain *d)
> +{
> +    BUG();
> +}
> +
> +
> +static inline int guest_physmap_mark_populate_on_demand(struct domain *d, unsigned long gfn,
> +                                                        unsigned int order)
> +{
> +    BUG();
> +    return 1;
> +}
> +
> +static inline int guest_physmap_add_entry(struct domain *d,
> +                            gfn_t gfn,
> +                            mfn_t mfn,
> +                            unsigned long page_order,
> +                            p2m_type_t t)
> +{
> +    BUG();
> +    return 1;
> +}
> +
> +/* Untyped version for RAM only, for compatibility */
> +static inline int __must_check
> +guest_physmap_add_page(struct domain *d, gfn_t gfn, mfn_t mfn,
> +                       unsigned int page_order)
> +{
> +    return guest_physmap_add_entry(d, gfn, mfn, page_order, p2m_ram_rw);
> +}
> +
> +static inline mfn_t gfn_to_mfn(struct domain *d, gfn_t gfn)
> +{
> +    BUG();
> +    return _mfn(0);
> +}
> +
> +static inline bool arch_acquire_resource_check(struct domain *d)
> +{
> +    /*
> +     * The reference counting of foreign entries in set_foreign_p2m_entry()
> +     * is supported on RISCV.
> +     */
> +    return true;
> +}
> +
> +static inline void p2m_altp2m_check(struct vcpu *v, uint16_t idx)
> +{
> +    /* Not supported on RISCV. */
> +}
> +
> +#endif /* __ASM_RISCV_P2M_H__ */
> \ No newline at end of file

This wants taking care of.

Jan
Jan Beulich Dec. 14, 2023, 3:01 p.m. UTC | #2
On 24.11.2023 11:30, Oleksii Kurochko wrote:
> +static inline int guest_physmap_mark_populate_on_demand(struct domain *d, unsigned long gfn,
> +                                                        unsigned int order)
> +{
> +    BUG();
> +    return 1;
> +}

This one I actually don't think needs to be a stub. It can return
-EOPNOTSUPP or -EINVAL right away, and then wouldn't need touching
again afaict.

Jan
Oleksii Kurochko Dec. 18, 2023, 10:06 a.m. UTC | #3
On Thu, 2023-12-14 at 15:19 +0100, Jan Beulich wrote:
> On 24.11.2023 11:30, Oleksii Kurochko wrote:
> > --- /dev/null
> > +++ b/xen/arch/riscv/include/asm/p2m.h
> > @@ -0,0 +1,105 @@
> > +#ifndef __ASM_RISCV_P2M_H__
> > +#define __ASM_RISCV_P2M_H__
> > +
> > +#include <asm/page-bits.h>
> > +
> > +#define paddr_bits PADDR_BITS
> > +
> > +/*
> > + * List of possible type for each page in the p2m entry.
> > + * The number of available bit per page in the pte for this
> > purpose is 4 bits.
> > + * So it's possible to only have 16 fields. If we run out of value
> > in the
> > + * future, it's possible to use higher value for pseudo-type and
> > don't store
> > + * them in the p2m entry.
> > + */
> > +typedef enum {
> > +    p2m_invalid = 0,    /* Nothing mapped here */
> > +    p2m_ram_rw,         /* Normal read/write guest RAM */
> > +    p2m_ram_ro,         /* Read-only; writes are silently dropped
> > */
> > +    p2m_mmio_direct_dev,/* Read/write mapping of genuine Device
> > MMIO area */
> > +    p2m_mmio_direct_nc, /* Read/write mapping of genuine MMIO area
> > non-cacheable */
> > +    p2m_mmio_direct_c,  /* Read/write mapping of genuine MMIO area
> > cacheable */
> > +    p2m_map_foreign_rw, /* Read/write RAM pages from foreign
> > domain */
> > +    p2m_map_foreign_ro, /* Read-only RAM pages from foreign domain
> > */
> > +    p2m_grant_map_rw,   /* Read/write grant mapping */
> > +    p2m_grant_map_ro,   /* Read-only grant mapping */
> > +    /* The types below are only used to decide the page attribute
> > in the P2M */
> > +    p2m_iommu_map_rw,   /* Read/write iommu mapping */
> > +    p2m_iommu_map_ro,   /* Read-only iommu mapping */
> > +    p2m_max_real_type,  /* Types after this won't be store in the
> > p2m */
> > +} p2m_type_t;
> 
> As indicated before, I think you should have only those types here
> which
> you're sure you'll need, or even just those which you really need
> right
> away. I question in particular p2m_mmio_direct_*, which all look like
> you simply took them from Arm, without regard as to applicability to
> RISC-V. The fewer types you have here, the easier it is going to be
> to
> tell what needs adding and what is already _properly_ supported.
Agreed. I have to leave only the ones that are necessary for now.
I'll update that.

> 
> > +#include <xen/p2m-common.h>
> > +
> > +static inline int get_page_and_type(struct page_info *page,
> > +                                    struct domain *domain,
> > +                                    unsigned long type)
> > +{
> > +    BUG();
> > +    return 1;
> > +}
> 
> Imo despite the BUG() any such stub would better return failure, just
> like ...
Thanks. I'll update that.

> 
> > +/* Look up a GFN and take a reference count on the backing page.
> > */
> > +typedef unsigned int p2m_query_t;
> > +#define P2M_ALLOC    (1u<<0)   /* Populate PoD and paged-out
> > entries */
> > +#define P2M_UNSHARE  (1u<<1)   /* Break CoW sharing */
> > +
> > +static inline struct page_info *get_page_from_gfn(
> > +    struct domain *d, unsigned long gfn, p2m_type_t *t,
> > p2m_query_t q)
> > +{
> > +    BUG();
> > +    return NULL;
> > +}
> 
> ... you do here. May apply again further down.
> 
> > +static inline void memory_type_changed(struct domain *d)
> > +{
> > +    BUG();
> > +}
> > +
> > +
> > +static inline int guest_physmap_mark_populate_on_demand(struct
> > domain *d, unsigned long gfn,
> > +                                                        unsigned
> > int order)
> > +{
> > +    BUG();
> > +    return 1;
> > +}
> > +
> > +static inline int guest_physmap_add_entry(struct domain *d,
> > +                            gfn_t gfn,
> > +                            mfn_t mfn,
> > +                            unsigned long page_order,
> > +                            p2m_type_t t)
> > +{
> > +    BUG();
> > +    return 1;
> > +}
> > +
> > +/* Untyped version for RAM only, for compatibility */
> > +static inline int __must_check
> > +guest_physmap_add_page(struct domain *d, gfn_t gfn, mfn_t mfn,
> > +                       unsigned int page_order)
> > +{
> > +    return guest_physmap_add_entry(d, gfn, mfn, page_order,
> > p2m_ram_rw);
> > +}
> > +
> > +static inline mfn_t gfn_to_mfn(struct domain *d, gfn_t gfn)
> > +{
> > +    BUG();
> > +    return _mfn(0);
> > +}
> > +
> > +static inline bool arch_acquire_resource_check(struct domain *d)
> > +{
> > +    /*
> > +     * The reference counting of foreign entries in
> > set_foreign_p2m_entry()
> > +     * is supported on RISCV.
> > +     */
> > +    return true;
> > +}
> > +
> > +static inline void p2m_altp2m_check(struct vcpu *v, uint16_t idx)
> > +{
> > +    /* Not supported on RISCV. */
> > +}
> > +
> > +#endif /* __ASM_RISCV_P2M_H__ */
> > \ No newline at end of file
> 
> This wants taking care of.
Thanks. I'll update that.

~ Oleksii
diff mbox series

Patch

diff --git a/xen/arch/riscv/include/asm/p2m.h b/xen/arch/riscv/include/asm/p2m.h
new file mode 100644
index 0000000000..993aec6d2c
--- /dev/null
+++ b/xen/arch/riscv/include/asm/p2m.h
@@ -0,0 +1,105 @@ 
+#ifndef __ASM_RISCV_P2M_H__
+#define __ASM_RISCV_P2M_H__
+
+#include <asm/page-bits.h>
+
+#define paddr_bits PADDR_BITS
+
+/*
+ * List of possible type for each page in the p2m entry.
+ * The number of available bit per page in the pte for this purpose is 4 bits.
+ * So it's possible to only have 16 fields. If we run out of value in the
+ * future, it's possible to use higher value for pseudo-type and don't store
+ * them in the p2m entry.
+ */
+typedef enum {
+    p2m_invalid = 0,    /* Nothing mapped here */
+    p2m_ram_rw,         /* Normal read/write guest RAM */
+    p2m_ram_ro,         /* Read-only; writes are silently dropped */
+    p2m_mmio_direct_dev,/* Read/write mapping of genuine Device MMIO area */
+    p2m_mmio_direct_nc, /* Read/write mapping of genuine MMIO area non-cacheable */
+    p2m_mmio_direct_c,  /* Read/write mapping of genuine MMIO area cacheable */
+    p2m_map_foreign_rw, /* Read/write RAM pages from foreign domain */
+    p2m_map_foreign_ro, /* Read-only RAM pages from foreign domain */
+    p2m_grant_map_rw,   /* Read/write grant mapping */
+    p2m_grant_map_ro,   /* Read-only grant mapping */
+    /* The types below are only used to decide the page attribute in the P2M */
+    p2m_iommu_map_rw,   /* Read/write iommu mapping */
+    p2m_iommu_map_ro,   /* Read-only iommu mapping */
+    p2m_max_real_type,  /* Types after this won't be store in the p2m */
+} p2m_type_t;
+
+#include <xen/p2m-common.h>
+
+static inline int get_page_and_type(struct page_info *page,
+                                    struct domain *domain,
+                                    unsigned long type)
+{
+    BUG();
+    return 1;
+}
+
+/* Look up a GFN and take a reference count on the backing page. */
+typedef unsigned int p2m_query_t;
+#define P2M_ALLOC    (1u<<0)   /* Populate PoD and paged-out entries */
+#define P2M_UNSHARE  (1u<<1)   /* Break CoW sharing */
+
+static inline struct page_info *get_page_from_gfn(
+    struct domain *d, unsigned long gfn, p2m_type_t *t, p2m_query_t q)
+{
+    BUG();
+    return NULL;
+}
+
+static inline void memory_type_changed(struct domain *d)
+{
+    BUG();
+}
+
+
+static inline int guest_physmap_mark_populate_on_demand(struct domain *d, unsigned long gfn,
+                                                        unsigned int order)
+{
+    BUG();
+    return 1;
+}
+
+static inline int guest_physmap_add_entry(struct domain *d,
+                            gfn_t gfn,
+                            mfn_t mfn,
+                            unsigned long page_order,
+                            p2m_type_t t)
+{
+    BUG();
+    return 1;
+}
+
+/* Untyped version for RAM only, for compatibility */
+static inline int __must_check
+guest_physmap_add_page(struct domain *d, gfn_t gfn, mfn_t mfn,
+                       unsigned int page_order)
+{
+    return guest_physmap_add_entry(d, gfn, mfn, page_order, p2m_ram_rw);
+}
+
+static inline mfn_t gfn_to_mfn(struct domain *d, gfn_t gfn)
+{
+    BUG();
+    return _mfn(0);
+}
+
+static inline bool arch_acquire_resource_check(struct domain *d)
+{
+    /*
+     * The reference counting of foreign entries in set_foreign_p2m_entry()
+     * is supported on RISCV.
+     */
+    return true;
+}
+
+static inline void p2m_altp2m_check(struct vcpu *v, uint16_t idx)
+{
+    /* Not supported on RISCV. */
+}
+
+#endif /* __ASM_RISCV_P2M_H__ */
\ No newline at end of file