@@ -5,6 +5,8 @@
#include <linux/bitfield.h>
#include <linux/kvm_host.h>
+#include <asm/kvm_pgtable.h>
+
static inline bool nested_virt_in_use(const struct kvm_vcpu *vcpu)
{
return (!__is_defined(__KVM_NVHE_HYPERVISOR__) &&
@@ -135,4 +137,10 @@ void access_nested_id_reg(struct kvm_vcpu *v, struct sys_reg_params *p,
#define KVM_NV_GUEST_MAP_SZ GENMASK_ULL(56, 55)
+static inline u64 kvm_encode_nested_level(struct kvm_s2_trans *trans)
+{
+ return FIELD_PREP(KVM_PGTABLE_PROT_S2_SW1 | KVM_PGTABLE_PROT_S2_SW0,
+ trans->level);
+}
+
#endif /* __ARM64_KVM_NESTED_H */
@@ -920,11 +920,17 @@ static int user_mem_abort(struct kvm_vcpu *vcpu, phys_addr_t fault_ipa,
* Potentially reduce shadow S2 permissions to match the guest's own
* S2. For exec faults, we'd only reach this point if the guest
* actually allowed it (see kvm_s2_handle_perm_fault).
+ *
+ * Also encode the level of the nested translation in the SW bits of
+ * the PTE/PMD/PUD. This will be retrived on TLB invalidation from
+ * the guest.
*/
if (kvm_is_shadow_s2_fault(vcpu)) {
writable &= kvm_s2_trans_writable(nested);
if (!kvm_s2_trans_readable(nested))
prot &= ~KVM_PGTABLE_PROT_R;
+
+ prot |= kvm_encode_nested_level(nested);
}
spin_lock(&kvm->mmu_lock);
Populate bits [56:55] of the leaf entry with the level provided by the guest's S2 translation. Signed-off-by: Marc Zyngier <maz@kernel.org> --- arch/arm64/include/asm/kvm_nested.h | 8 ++++++++ arch/arm64/kvm/mmu.c | 6 ++++++ 2 files changed, 14 insertions(+)