@@ -21,6 +21,7 @@ size_t kvm_iommu_map_pages(pkvm_handle_t domain_id,
size_t kvm_iommu_unmap_pages(pkvm_handle_t domain_id, unsigned long iova,
size_t pgsize, size_t pgcount);
phys_addr_t kvm_iommu_iova_to_phys(pkvm_handle_t domain_id, unsigned long iova);
+bool kvm_iommu_host_dabt_handler(struct kvm_cpu_context *host_ctxt, u64 esr, u64 addr);
/* Flags for memory allocation for IOMMU drivers */
#define IOMMU_PAGE_NOCACHE BIT(0)
@@ -49,6 +50,7 @@ struct kvm_iommu_ops {
phys_addr_t (*iova_to_phys)(struct kvm_hyp_iommu_domain *domain, unsigned long iova);
void (*iotlb_sync)(struct kvm_hyp_iommu_domain *domain,
struct iommu_iotlb_gather *gather);
+ bool (*dabt_handler)(struct kvm_cpu_context *host_ctxt, u64 esr, u64 addr);
};
int kvm_iommu_init(void);
@@ -4,6 +4,10 @@
*
* Copyright (C) 2022 Linaro Ltd.
*/
+#include <asm/kvm_hyp.h>
+
+#include <hyp/adjust_pc.h>
+
#include <kvm/iommu.h>
#include <nvhe/iommu.h>
@@ -375,6 +379,19 @@ phys_addr_t kvm_iommu_iova_to_phys(pkvm_handle_t domain_id, unsigned long iova)
return phys;
}
+bool kvm_iommu_host_dabt_handler(struct kvm_cpu_context *host_ctxt, u64 esr, u64 addr)
+{
+ bool ret = false;
+
+ if (kvm_iommu_ops && kvm_iommu_ops->dabt_handler)
+ ret = kvm_iommu_ops->dabt_handler(host_ctxt, esr, addr);
+
+ if (ret)
+ kvm_skip_host_instr();
+
+ return ret;
+}
+
static int iommu_power_on(struct kvm_power_domain *pd)
{
struct kvm_hyp_iommu *iommu = container_of(pd, struct kvm_hyp_iommu,
@@ -16,6 +16,7 @@
#include <hyp/fault.h>
#include <nvhe/gfp.h>
+#include <nvhe/iommu.h>
#include <nvhe/memory.h>
#include <nvhe/mem_protect.h>
#include <nvhe/mm.h>
@@ -799,11 +800,16 @@ static int handle_host_perm_fault(struct kvm_cpu_context *host_ctxt, u64 esr, u6
return handled ? 0 : -EPERM;
}
+static bool is_dabt(u64 esr)
+{
+ return ESR_ELx_EC(esr) == ESR_ELx_EC_DABT_LOW;
+}
+
void handle_host_mem_abort(struct kvm_cpu_context *host_ctxt)
{
struct kvm_vcpu_fault_info fault;
u64 esr, addr;
- int ret = 0;
+ int ret = -EPERM;
esr = read_sysreg_el2(SYS_ESR);
if (!__get_fault_info(esr, &fault)) {
@@ -817,7 +823,15 @@ void handle_host_mem_abort(struct kvm_cpu_context *host_ctxt)
}
addr = (fault.hpfar_el2 & HPFAR_MASK) << 8;
- ret = host_stage2_idmap(addr);
+ addr |= fault.far_el2 & FAR_MASK;
+
+ if (is_dabt(esr) && !addr_is_memory(addr) &&
+ kvm_iommu_host_dabt_handler(host_ctxt, esr, addr))
+ goto return_to_host;
+
+ /* If not handled, attempt to map the page. */
+ if (ret == -EPERM)
+ ret = host_stage2_idmap(addr);
if ((esr & ESR_ELx_FSC_TYPE) == ESR_ELx_FSC_PERM)
ret = handle_host_perm_fault(host_ctxt, esr, addr);
@@ -827,6 +841,7 @@ void handle_host_mem_abort(struct kvm_cpu_context *host_ctxt)
else
BUG_ON(ret && ret != -EAGAIN);
+return_to_host:
trace_host_mem_abort(esr, addr);
}
Soon, SMMUv3 driver would be added and it would need to emulate access to some of its MMIO space. Add a handler for DABTs for IOMMU drivers to be able to do so. Signed-off-by: Mostafa Saleh <smostafa@google.com> --- arch/arm64/kvm/hyp/include/nvhe/iommu.h | 2 ++ arch/arm64/kvm/hyp/nvhe/iommu/iommu.c | 17 +++++++++++++++++ arch/arm64/kvm/hyp/nvhe/mem_protect.c | 19 +++++++++++++++++-- 3 files changed, 36 insertions(+), 2 deletions(-)