@@ -470,33 +470,34 @@ int nested_svm_vmrun(struct vcpu_svm *svm)
{
int ret;
struct vmcb *vmcb12;
+ struct kvm_vcpu *vcpu = &svm->vcpu;
struct vmcb *hsave = svm->nested.hsave;
struct vmcb *vmcb = svm->vmcb;
struct kvm_host_map map;
u64 vmcb12_gpa;
- if (is_smm(&svm->vcpu)) {
- kvm_queue_exception(&svm->vcpu, UD_VECTOR);
+ if (is_smm(vcpu)) {
+ kvm_queue_exception(vcpu, UD_VECTOR);
return 1;
}
- vmcb12_gpa = svm->vmcb->save.rax;
- ret = kvm_vcpu_map(&svm->vcpu, gpa_to_gfn(vmcb12_gpa), &map);
+ vmcb12_gpa = vmcb->save.rax;
+ ret = kvm_vcpu_map(vcpu, gpa_to_gfn(vmcb12_gpa), &map);
if (ret == -EINVAL) {
- kvm_inject_gp(&svm->vcpu, 0);
+ kvm_inject_gp(vcpu, 0);
return 1;
} else if (ret) {
- return kvm_skip_emulated_instruction(&svm->vcpu);
+ return kvm_skip_emulated_instruction(vcpu);
}
- ret = kvm_skip_emulated_instruction(&svm->vcpu);
+ ret = kvm_skip_emulated_instruction(vcpu);
vmcb12 = map.hva;
if (WARN_ON_ONCE(!svm->nested.initialized))
return -EINVAL;
- if (!nested_vmcb_checks(&svm->vcpu, vmcb12)) {
+ if (!nested_vmcb_checks(vcpu, vmcb12)) {
vmcb12->control.exit_code = SVM_EXIT_ERR;
vmcb12->control.exit_code_hi = 0;
vmcb12->control.exit_info_1 = 0;
@@ -504,7 +505,7 @@ int nested_svm_vmrun(struct vcpu_svm *svm)
goto out;
}
- trace_kvm_nested_vmrun(svm->vmcb->save.rip, vmcb12_gpa,
+ trace_kvm_nested_vmrun(vmcb->save.rip, vmcb12_gpa,
vmcb12->save.rip,
vmcb12->control.int_ctl,
vmcb12->control.event_inj,
@@ -518,8 +519,8 @@ int nested_svm_vmrun(struct vcpu_svm *svm)
vmcb12->control.intercepts[INTERCEPT_WORD5]);
/* Clear internal status */
- kvm_clear_exception_queue(&svm->vcpu);
- kvm_clear_interrupt_queue(&svm->vcpu);
+ kvm_clear_exception_queue(vcpu);
+ kvm_clear_interrupt_queue(vcpu);
/*
* Save the old vmcb, so we don't need to pick what we save, but can
@@ -531,17 +532,17 @@ int nested_svm_vmrun(struct vcpu_svm *svm)
hsave->save.ds = vmcb->save.ds;
hsave->save.gdtr = vmcb->save.gdtr;
hsave->save.idtr = vmcb->save.idtr;
- hsave->save.efer = svm->vcpu.arch.efer;
- hsave->save.cr0 = kvm_read_cr0(&svm->vcpu);
- hsave->save.cr4 = svm->vcpu.arch.cr4;
- hsave->save.rflags = kvm_get_rflags(&svm->vcpu);
- hsave->save.rip = kvm_rip_read(&svm->vcpu);
+ hsave->save.efer = vcpu->arch.efer;
+ hsave->save.cr0 = kvm_read_cr0(vcpu);
+ hsave->save.cr4 = vcpu->arch.cr4;
+ hsave->save.rflags = kvm_get_rflags(vcpu);
+ hsave->save.rip = kvm_rip_read(vcpu);
hsave->save.rsp = vmcb->save.rsp;
hsave->save.rax = vmcb->save.rax;
if (npt_enabled)
hsave->save.cr3 = vmcb->save.cr3;
else
- hsave->save.cr3 = kvm_read_cr3(&svm->vcpu);
+ hsave->save.cr3 = kvm_read_cr3(vcpu);
copy_vmcb_control_area(&hsave->control, &vmcb->control);
@@ -556,15 +557,15 @@ int nested_svm_vmrun(struct vcpu_svm *svm)
out_exit_err:
svm->nested.nested_run_pending = 0;
- svm->vmcb->control.exit_code = SVM_EXIT_ERR;
- svm->vmcb->control.exit_code_hi = 0;
- svm->vmcb->control.exit_info_1 = 0;
- svm->vmcb->control.exit_info_2 = 0;
+ vmcb->control.exit_code = SVM_EXIT_ERR;
+ vmcb->control.exit_code_hi = 0;
+ vmcb->control.exit_info_1 = 0;
+ vmcb->control.exit_info_2 = 0;
nested_svm_vmexit(svm);
out:
- kvm_vcpu_unmap(&svm->vcpu, &map, true);
+ kvm_vcpu_unmap(vcpu, &map, true);
return ret;
}
Use local variables to derefence svm->vcpu and svm->vmcb as they make the code tidier. Suggested-by: Sean Christopherson <seanjc@google.com> Signed-off-by: Krish Sadhukhan <krish.sadhukhan@oracle.com> --- arch/x86/kvm/svm/nested.c | 45 ++++++++++++++++++++------------------- 1 file changed, 23 insertions(+), 22 deletions(-)