@@ -155,6 +155,72 @@ int altp2m_set_mem_access(struct domain *d,
return rc;
}
+/*
+ * The function altp2m_lazy_copy returns "false" on error. The return value
+ * "true" signals that either the mapping has been successfully lazy-copied
+ * from the hostp2m to the currently active altp2m view or that the altp2m view
+ * holds already a valid mapping. The latter is the case if multiple vcpus
+ * using the same altp2m view generate a translation fault that is led back in
+ * both cases to the same mapping and the first fault has been already handled.
+ */
+bool altp2m_lazy_copy(struct vcpu *v, gfn_t gfn)
+{
+ struct domain *d = v->domain;
+ struct p2m_domain *hp2m = p2m_get_hostp2m(d), *ap2m = NULL;
+ p2m_type_t p2mt;
+ p2m_access_t p2ma;
+ mfn_t mfn;
+ unsigned int page_order;
+ int rc;
+
+ ap2m = altp2m_get_altp2m(v);
+ if ( unlikely(!ap2m) )
+ return false;
+
+ /*
+ * Lock hp2m to prevent the hostp2m to change a mapping before it is added
+ * to the altp2m view.
+ */
+ p2m_read_lock(hp2m);
+ p2m_write_lock(ap2m);
+
+ /* Check if entry is part of the altp2m view. */
+ mfn = p2m_get_entry(ap2m, gfn, NULL, NULL, NULL);
+
+ /*
+ * If multiple vcpus are using the same altp2m, it is likely that both
+ * generate a translation fault, whereas the first one will be handled
+ * successfully and the second will encounter a valid mapping that has
+ * already been added as a result of the previous translation fault. In
+ * this case, the 2nd vcpu needs to retry accessing the faulting address.
+ */
+ if ( !mfn_eq(mfn, INVALID_MFN) )
+ goto out;
+
+ /* Check if entry is part of the host p2m view. */
+ mfn = p2m_get_entry(hp2m, gfn, &p2mt, &p2ma, &page_order);
+ if ( mfn_eq(mfn, INVALID_MFN) )
+ goto out;
+
+ /* Align the gfn and mfn to the given pager order. */
+ gfn = _gfn(gfn_x(gfn) & ~((1UL << page_order) - 1));
+ mfn = _mfn(mfn_x(mfn) & ~((1UL << page_order) - 1));
+
+ rc = p2m_set_entry(ap2m, gfn, (1UL << page_order), mfn, p2mt, p2ma);
+ if ( rc )
+ {
+ gdprintk(XENLOG_ERR, "altp2m[%u] failed to set entry for %#"PRI_gfn" -> %#"PRI_mfn"\n",
+ v->arch.ap2m_idx, gfn_x(gfn), mfn_x(mfn));
+ domain_crash(d);
+ }
+
+out:
+ p2m_write_unlock(ap2m);
+ p2m_read_unlock(hp2m);
+
+ return true;
+}
+
static inline void altp2m_reset(struct p2m_domain *p2m)
{
p2m_write_lock(p2m);
@@ -52,6 +52,8 @@
#include <asm/cpuerrata.h>
#include <asm/acpi.h>
+#include <asm/altp2m.h>
+
/* The base of the stack must always be double-word aligned, which means
* that both the kernel half of struct cpu_user_regs (which is pushed in
* entry.S) and struct cpu_info (which lives at the bottom of a Xen
@@ -2634,6 +2636,14 @@ static void do_trap_instr_abort_guest(struct cpu_user_regs *regs,
}
case FSC_FLT_TRANS:
/*
+ * The guest shall retry accessing the page if the altp2m handler
+ * succeeds. Otherwise, we continue injecting an instruction abort
+ * exception.
+ */
+ if ( altp2m_lazy_copy(current, _gfn(paddr_to_pfn(gpa))) )
+ return;
+
+ /*
* The PT walk may have failed because someone was playing
* with the Stage-2 page table. Walk the Stage-2 PT to check
* if the entry exists. If it's the case, return to the guest
@@ -2774,6 +2784,13 @@ static void do_trap_data_abort_guest(struct cpu_user_regs *regs,
}
/*
+ * The guest shall retry accessing the page if the altp2m handler
+ * succeeds. Otherwise, we continue injecting a data abort exception.
+ */
+ if ( altp2m_lazy_copy(current, _gfn(paddr_to_pfn(info.gpa))) )
+ return;
+
+ /*
* The PT walk may have failed because someone was playing
* with the Stage-2 page table. Walk the Stage-2 PT to check
* if the entry exists. If it's the case, return to the guest
@@ -81,6 +81,10 @@ int altp2m_set_mem_access(struct domain *d,
p2m_access_t a,
gfn_t gfn);
+/* Alternate p2m paging mechanism. */
+bool altp2m_lazy_copy(struct vcpu *v,
+ gfn_t gfn);
+
/* Propagates changes made to hostp2m to affected altp2m views. */
int altp2m_propagate_change(struct domain *d,
gfn_t sgfn,
This commit adds the function "altp2m_lazy_copy" implementing the altp2m paging mechanism. The function "altp2m_lazy_copy" lazily copies the hostp2m's mapping into the currently active altp2m view on 2nd stage translation faults on instruction or data access. Signed-off-by: Sergej Proskurin <proskurin@sec.in.tum.de> --- Cc: Stefano Stabellini <sstabellini@kernel.org> Cc: Julien Grall <julien.grall@arm.com> --- v3: Cosmetic fixes. Locked hostp2m in the function "altp2m_lazy_copy" to avoid a mapping being changed in hostp2m before it has been inserted into the valtp2m view. Removed unnecessary calls to "p2m_mem_access_check" in the functions "do_trap_instr_abort_guest" and "do_trap_data_abort_guest" after a translation fault has been handled by the function "altp2m_lazy_copy". Adapted "altp2m_lazy_copy" to return the value "true" if the encountered translation fault encounters a valid entry inside of the currently active altp2m view. If multiple vcpus are using the same altp2m, it is likely that both generate a translation fault, whereas the first one will be already handled by "altp2m_lazy_copy". With this change the 2nd vcpu will retry accessing the faulting address. Changed order of altp2m checking and MMIO emulation within the function "do_trap_data_abort_guest". Now, altp2m is checked and handled only if the MMIO does not have to be emulated. Changed the function prototype of "altp2m_lazy_copy". This commit removes the unnecessary struct p2m_domain* from the previous function prototype. Also, this commit removes the unnecessary argument gva. Finally, this commit changes the address of the function parameter gpa from paddr_t to gfn_t and renames it to gfn. Moved the altp2m handling mechanism into a separate function "try_handle_altp2m". Moved the functions "p2m_altp2m_check" and "altp2m_switch_vcpu_altp2m_by_id" out of this patch. Moved applied code movement into a separate patch. v4: Cosmetic fixes. Changed the function prototype of "altp2m_lazy_copy" and "try_handle_altp2m" by removing the unused function parameter of type "struct npfec". Removed the function "try_handle_altp2m". Please note that we cannot reorder the calls to "altp2m_lazy_copy" and "gfn_to_mfn" as to deprioritize altp2m. If the call to "gfn_to_mfn" would be performed before "altp2m_lazy_copy", the system would likely stall if altp2m was active. This is because the "p2m_lookup" routine in "gfn_to_mfn" considers only the host's p2m, which will most likely return a mfn != INVALID_MFN and thus entirely skip the call to "altp2m_lazy_copy". Use the functions "p2m_(set|get)_entry" instead of the helpers "p2m_lookup_attr" and "modify_altp2m_entry" in the function "altp2m_lazy_copy". Therefore, we write-lock the altp2m view throughout the entire function. Moved read-locking of hp2m to the beginning of the function "altp2m_lazy_copy". --- xen/arch/arm/altp2m.c | 66 ++++++++++++++++++++++++++++++++++++++++++++ xen/arch/arm/traps.c | 17 ++++++++++++ xen/include/asm-arm/altp2m.h | 4 +++ 3 files changed, 87 insertions(+)