@@ -32,6 +32,7 @@ static int __p2m_get_mem_access(struct domain *d, gfn_t gfn,
ACCESS(rwx),
ACCESS(rx2rw),
ACCESS(n2rwx),
+ ACCESS(r_pw),
#undef ACCESS
};
@@ -172,6 +173,7 @@ p2m_mem_access_check_and_get_page(vaddr_t gva, unsigned long flag,
break;
else
goto err;
+ case XENMEM_access_r_pw:
case XENMEM_access_rx2rw:
case XENMEM_access_rx:
case XENMEM_access_r:
@@ -253,6 +255,7 @@ bool p2m_mem_access_check(paddr_t gpa, vaddr_t gla, const struct npfec npfec)
violation = npfec.read_access || npfec.insn_fetch;
break;
case XENMEM_access_r:
+ case XENMEM_access_r_pw:
violation = npfec.write_access || npfec.insn_fetch;
break;
default:
@@ -361,6 +364,7 @@ long p2m_set_mem_access(struct domain *d, gfn_t gfn, uint32_t nr,
ACCESS(rwx),
ACCESS(rx2rw),
ACCESS(n2rwx),
+ ACCESS(r_pw),
#undef ACCESS
};
@@ -597,6 +597,7 @@ static void p2m_set_permission(lpae_t *e, p2m_type_t t, p2m_access_t a)
e->p2m.read = 0;
break;
case p2m_access_r:
+ case p2m_access_r_pw:
e->p2m.write = 0;
e->p2m.xn = 1;
break;
@@ -1897,6 +1897,7 @@ int hvm_hap_nested_page_fault(paddr_t gpa, unsigned long gla,
violation = npfec.read_access || npfec.write_access || npfec.insn_fetch;
break;
case p2m_access_r:
+ case p2m_access_r_pw:
violation = npfec.write_access || npfec.insn_fetch;
break;
case p2m_access_w:
@@ -295,6 +295,7 @@ bool hvm_monitor_check_p2m(unsigned long gla, gfn_t gfn, uint32_t pfec,
case XENMEM_access_r:
case XENMEM_access_n:
+ case XENMEM_access_r_pw:
if ( pfec & PFEC_write_access )
req.u.mem_access.flags |= MEM_ACCESS_R | MEM_ACCESS_W;
if ( pfec & PFEC_insn_fetch )
@@ -203,6 +203,7 @@ static void __init vmx_display_features(void)
P(cpu_has_vmx_bus_lock_detection, "Bus Lock Detection");
P(cpu_has_vmx_notify_vm_exiting, "Notify VM Exit");
P(cpu_has_vmx_virt_spec_ctrl, "Virtualize SPEC_CTRL");
+ P(cpu_has_vmx_ept_paging_write, "EPT Paging-Write");
#undef P
if ( !printed )
@@ -366,7 +367,8 @@ static int vmx_init_vmcs_config(bool bsp)
if ( _vmx_cpu_based_exec_control & CPU_BASED_ACTIVATE_TERTIARY_CONTROLS )
{
- uint64_t opt = TERTIARY_EXEC_VIRT_SPEC_CTRL;
+ uint64_t opt = (TERTIARY_EXEC_VIRT_SPEC_CTRL |
+ TERTIARY_EXEC_EPT_PAGING_WRITE);
_vmx_tertiary_exec_control = adjust_vmx_controls2(
"Tertiary Exec Control", 0, opt,
@@ -273,6 +273,9 @@ extern uint64_t vmx_tertiary_exec_control;
#define cpu_has_vmx_virt_spec_ctrl \
(vmx_tertiary_exec_control & TERTIARY_EXEC_VIRT_SPEC_CTRL)
+#define cpu_has_vmx_ept_paging_write \
+ (vmx_tertiary_exec_control & TERTIARY_EXEC_EPT_PAGING_WRITE)
+
#define VMX_EPT_EXEC_ONLY_SUPPORTED 0x00000001
#define VMX_EPT_WALK_LENGTH_4_SUPPORTED 0x00000040
#define VMX_EPT_MEMORY_TYPE_UC 0x00000100
@@ -980,6 +980,7 @@ static inline unsigned int p2m_access_to_iommu_flags(p2m_access_t p2ma)
case p2m_access_r:
case p2m_access_rx:
case p2m_access_rx2rw:
+ case p2m_access_r_pw:
return IOMMUF_readable;
case p2m_access_w:
@@ -213,6 +213,9 @@ int nestedhvm_hap_nested_page_fault(
case p2m_access_n2rwx:
p2ma_10 = p2m_access_n;
break;
+ case p2m_access_r_pw:
+ p2ma_10 = p2m_access_r;
+ break;
default:
p2ma_10 = p2m_access_n;
/* For safety, remove all permissions. */
@@ -45,6 +45,7 @@ static int _p2m_get_mem_access(struct p2m_domain *p2m, gfn_t gfn,
ACCESS(rwx),
ACCESS(rx2rw),
ACCESS(n2rwx),
+ ACCESS(r_pw),
#undef ACCESS
};
@@ -94,6 +95,7 @@ bool p2m_mem_access_emulate_check(struct vcpu *v,
break;
case XENMEM_access_r:
+ case XENMEM_access_r_pw:
violation = data->flags & MEM_ACCESS_WX;
break;
@@ -312,6 +314,7 @@ bool xenmem_access_to_p2m_access(const struct p2m_domain *p2m,
ACCESS(rwx),
ACCESS(rx2rw),
ACCESS(n2rwx),
+ ACCESS(r_pw),
#undef ACCESS
};
@@ -176,6 +176,10 @@ static void ept_p2m_type_to_flags(const struct p2m_domain *p2m,
break;
case p2m_access_rwx:
break;
+ case p2m_access_r_pw:
+ entry->w = entry->x = 0;
+ entry->pw = !!cpu_has_vmx_ept_paging_write;
+ break;
}
/*
@@ -426,6 +426,15 @@ typedef enum {
* pausing the vcpu
*/
XENMEM_access_n2rwx,
+
+ /*
+ * Same as XENMEM_access_r, but on processors with
+ * the TERTIARY_EXEC_EPT_PAGING_WRITE support,
+ * CPU-initiated page-table walks can still
+ * write to it (e.g., update A/D bits)
+ */
+ XENMEM_access_r_pw,
+
/* Take the domain default */
XENMEM_access_default
} xenmem_access_t;
@@ -64,6 +64,12 @@ typedef enum {
* generates an event but does not pause the
* vcpu */
+ p2m_access_r_pw = 10, /* Special: same as R, but on processors with
+ * the TERTIARY_EXEC_EPT_PAGING_WRITE support,
+ * CPU-initiated page-table walks can still
+ * write to it (e.g., update A/D bits)
+ */
+
/* NOTE: Assumed to be only 4 bits right now on x86. */
} p2m_access_t;