@@ -977,14 +977,14 @@ static void init_vmcb(struct vcpu_svm *svm)
svm->vcpu.arch.hflags = 0;
- set_cr_intercept(svm, INTERCEPT_CR0_READ);
- set_cr_intercept(svm, INTERCEPT_CR3_READ);
- set_cr_intercept(svm, INTERCEPT_CR4_READ);
- set_cr_intercept(svm, INTERCEPT_CR0_WRITE);
- set_cr_intercept(svm, INTERCEPT_CR3_WRITE);
- set_cr_intercept(svm, INTERCEPT_CR4_WRITE);
+ set_intercept(svm, INTERCEPT_CR0_READ);
+ set_intercept(svm, INTERCEPT_CR3_READ);
+ set_intercept(svm, INTERCEPT_CR4_READ);
+ set_intercept(svm, INTERCEPT_CR0_WRITE);
+ set_intercept(svm, INTERCEPT_CR3_WRITE);
+ set_intercept(svm, INTERCEPT_CR4_WRITE);
if (!kvm_vcpu_apicv_active(&svm->vcpu))
- set_cr_intercept(svm, INTERCEPT_CR8_WRITE);
+ set_intercept(svm, INTERCEPT_CR8_WRITE);
set_dr_intercepts(svm);
@@ -1079,8 +1079,8 @@ static void init_vmcb(struct vcpu_svm *svm)
control->nested_ctl |= SVM_NESTED_CTL_NP_ENABLE;
clr_intercept(svm, INTERCEPT_INVLPG);
clr_exception_intercept(svm, INTERCEPT_PF_VECTOR);
- clr_cr_intercept(svm, INTERCEPT_CR3_READ);
- clr_cr_intercept(svm, INTERCEPT_CR3_WRITE);
+ clr_intercept(svm, INTERCEPT_CR3_READ);
+ clr_intercept(svm, INTERCEPT_CR3_WRITE);
save->g_pat = svm->vcpu.arch.pat;
save->cr3 = 0;
save->cr4 = 0;
@@ -1534,11 +1534,11 @@ static void update_cr0_intercept(struct vcpu_svm *svm)
mark_dirty(svm->vmcb, VMCB_CR);
if (gcr0 == *hcr0) {
- clr_cr_intercept(svm, INTERCEPT_CR0_READ);
- clr_cr_intercept(svm, INTERCEPT_CR0_WRITE);
+ clr_intercept(svm, INTERCEPT_CR0_READ);
+ clr_intercept(svm, INTERCEPT_CR0_WRITE);
} else {
- set_cr_intercept(svm, INTERCEPT_CR0_READ);
- set_cr_intercept(svm, INTERCEPT_CR0_WRITE);
+ set_intercept(svm, INTERCEPT_CR0_READ);
+ set_intercept(svm, INTERCEPT_CR0_WRITE);
}
}
@@ -2916,7 +2916,7 @@ static int handle_exit(struct kvm_vcpu *vcpu, fastpath_t exit_fastpath)
trace_kvm_exit(exit_code, vcpu, KVM_ISA_SVM);
- if (!is_cr_intercept(svm, INTERCEPT_CR0_WRITE))
+ if (!is_intercept(svm, INTERCEPT_CR0_WRITE))
vcpu->arch.cr0 = svm->vmcb->save.cr0;
if (npt_enabled)
vcpu->arch.cr3 = svm->vmcb->save.cr3;
@@ -3042,13 +3042,13 @@ static void update_cr8_intercept(struct kvm_vcpu *vcpu, int tpr, int irr)
if (svm_nested_virtualize_tpr(vcpu))
return;
- clr_cr_intercept(svm, INTERCEPT_CR8_WRITE);
+ clr_intercept(svm, INTERCEPT_CR8_WRITE);
if (irr == -1)
return;
if (tpr >= irr)
- set_cr_intercept(svm, INTERCEPT_CR8_WRITE);
+ set_intercept(svm, INTERCEPT_CR8_WRITE);
}
bool svm_nmi_blocked(struct kvm_vcpu *vcpu)
@@ -3236,7 +3236,7 @@ static inline void sync_cr8_to_lapic(struct kvm_vcpu *vcpu)
if (svm_nested_virtualize_tpr(vcpu))
return;
- if (!is_cr_intercept(svm, INTERCEPT_CR8_WRITE)) {
+ if (!is_intercept(svm, INTERCEPT_CR8_WRITE)) {
int cr8 = svm->vmcb->control.int_ctl & V_TPR_MASK;
kvm_set_cr8(vcpu, cr8);
}
@@ -231,31 +231,6 @@ static inline bool __is_intercept(void *addr, int bit)
return test_bit(bit, (unsigned long *)addr);
}
-static inline void set_cr_intercept(struct vcpu_svm *svm, int bit)
-{
- struct vmcb *vmcb = get_host_vmcb(svm);
-
- __set_intercept(&vmcb->control.intercepts, bit);
-
- recalc_intercepts(svm);
-}
-
-static inline void clr_cr_intercept(struct vcpu_svm *svm, int bit)
-{
- struct vmcb *vmcb = get_host_vmcb(svm);
-
- __clr_intercept(&vmcb->control.intercepts, bit);
-
- recalc_intercepts(svm);
-}
-
-static inline bool is_cr_intercept(struct vcpu_svm *svm, int bit)
-{
- struct vmcb *vmcb = get_host_vmcb(svm);
-
- return __is_intercept(&vmcb->control.intercepts, bit);
-}
-
static inline void set_dr_intercepts(struct vcpu_svm *svm)
{
struct vmcb *vmcb = get_host_vmcb(svm);
Remove set_cr_intercept, clr_cr_intercept and is_cr_intercept. Instead call generic set_intercept, clr_intercept and is_intercept for all cr intercepts. Signed-off-by: Babu Moger <babu.moger@amd.com> --- arch/x86/kvm/svm/svm.c | 34 +++++++++++++++++----------------- arch/x86/kvm/svm/svm.h | 25 ------------------------- 2 files changed, 17 insertions(+), 42 deletions(-)