@@ -84,6 +84,7 @@ KVM_X86_OP_NULL(sync_pir_to_irr)
KVM_X86_OP(set_tss_addr)
KVM_X86_OP(set_identity_map_addr)
KVM_X86_OP(get_mt_mask)
+KVM_X86_OP(try_get_mt_mask)
KVM_X86_OP(load_mmu_pgd)
KVM_X86_OP_NULL(has_wbinvd_exit)
KVM_X86_OP(get_l2_tsc_offset)
@@ -1400,6 +1400,8 @@ struct kvm_x86_ops {
int (*set_tss_addr)(struct kvm *kvm, unsigned int addr);
int (*set_identity_map_addr)(struct kvm *kvm, u64 ident_addr);
u64 (*get_mt_mask)(struct kvm_vcpu *vcpu, gfn_t gfn, bool is_mmio);
+ bool (*try_get_mt_mask)(struct kvm *kvm, gfn_t gfn,
+ bool is_mmio, u64 *mask);
void (*load_mmu_pgd)(struct kvm_vcpu *vcpu, hpa_t root_hpa,
int root_level);
@@ -4067,6 +4067,13 @@ static bool svm_has_emulated_msr(struct kvm *kvm, u32 index)
return true;
}
+static bool svm_try_get_mt_mask(struct kvm *kvm, gfn_t gfn,
+ bool is_mmio, u64 *mask)
+{
+ *mask = 0;
+ return true;
+}
+
static u64 svm_get_mt_mask(struct kvm_vcpu *vcpu, gfn_t gfn, bool is_mmio)
{
return 0;
@@ -4660,6 +4667,7 @@ static struct kvm_x86_ops svm_x86_ops __initdata = {
.set_tss_addr = svm_set_tss_addr,
.set_identity_map_addr = svm_set_identity_map_addr,
.get_mt_mask = svm_get_mt_mask,
+ .try_get_mt_mask = svm_try_get_mt_mask,
.get_exit_info = svm_get_exit_info,
@@ -7658,6 +7658,7 @@ static struct kvm_x86_ops vmx_x86_ops __initdata = {
.set_tss_addr = vmx_set_tss_addr,
.set_identity_map_addr = vmx_set_identity_map_addr,
.get_mt_mask = vmx_get_mt_mask,
+ .try_get_mt_mask = vmx_try_get_mt_mask,
.get_exit_info = vmx_get_exit_info,
Add another function for getting the memory type mask to x86_ops. This version of the function can fail, but it does not require a vCPU pointer. It will be used in a subsequent commit for in-place large page promotion when disabling dirty logging. No functional change intended. Signed-off-by: Ben Gardon <bgardon@google.com> --- arch/x86/include/asm/kvm-x86-ops.h | 1 + arch/x86/include/asm/kvm_host.h | 2 ++ arch/x86/kvm/svm/svm.c | 8 ++++++++ arch/x86/kvm/vmx/vmx.c | 1 + 4 files changed, 12 insertions(+)