Message ID | 5124C99C.5090702@siemens.com (mailing list archive) |
---|---|
State | New, archived |
Headers | show |
On Wed, Feb 20, 2013 at 02:03:24PM +0100, Jan Kiszka wrote: > Cleanup: __vmx_complete_interrupts has no use for the vmx structure. > > Signed-off-by: Jan Kiszka <jan.kiszka@siemens.com> Fixed it to be applicable without "nVMX: Rework event injection and recovery" and applied. Thanks. > --- > > Note: this applies on top of "Rework event injection and recovery" > > arch/x86/kvm/vmx.c | 31 ++++++++++++++----------------- > 1 files changed, 14 insertions(+), 17 deletions(-) > > diff --git a/arch/x86/kvm/vmx.c b/arch/x86/kvm/vmx.c > index d99a519..d6ea4a7 100644 > --- a/arch/x86/kvm/vmx.c > +++ b/arch/x86/kvm/vmx.c > @@ -6430,7 +6430,7 @@ static void vmx_recover_nmi_blocking(struct vcpu_vmx *vmx) > ktime_to_ns(ktime_sub(ktime_get(), vmx->entry_time)); > } > > -static void __vmx_complete_interrupts(struct vcpu_vmx *vmx, > +static void __vmx_complete_interrupts(struct kvm_vcpu *vcpu, > u32 idt_vectoring_info, > int instr_len_field, > int error_code_field) > @@ -6441,46 +6441,43 @@ static void __vmx_complete_interrupts(struct vcpu_vmx *vmx, > > idtv_info_valid = idt_vectoring_info & VECTORING_INFO_VALID_MASK; > > - vmx->vcpu.arch.nmi_injected = false; > - kvm_clear_exception_queue(&vmx->vcpu); > - kvm_clear_interrupt_queue(&vmx->vcpu); > + vcpu->arch.nmi_injected = false; > + kvm_clear_exception_queue(vcpu); > + kvm_clear_interrupt_queue(vcpu); > > if (!idtv_info_valid) > return; > > - kvm_make_request(KVM_REQ_EVENT, &vmx->vcpu); > + kvm_make_request(KVM_REQ_EVENT, vcpu); > > vector = idt_vectoring_info & VECTORING_INFO_VECTOR_MASK; > type = idt_vectoring_info & VECTORING_INFO_TYPE_MASK; > > switch (type) { > case INTR_TYPE_NMI_INTR: > - vmx->vcpu.arch.nmi_injected = true; > + vcpu->arch.nmi_injected = true; > /* > * SDM 3: 27.7.1.2 (September 2008) > * Clear bit "block by NMI" before VM entry if a NMI > * delivery faulted. > */ > - vmx_set_nmi_mask(&vmx->vcpu, false); > + vmx_set_nmi_mask(vcpu, false); > break; > case INTR_TYPE_SOFT_EXCEPTION: > - vmx->vcpu.arch.event_exit_inst_len = > - vmcs_read32(instr_len_field); > + vcpu->arch.event_exit_inst_len = vmcs_read32(instr_len_field); > /* fall through */ > case INTR_TYPE_HARD_EXCEPTION: > if (idt_vectoring_info & VECTORING_INFO_DELIVER_CODE_MASK) { > u32 err = vmcs_read32(error_code_field); > - kvm_queue_exception_e(&vmx->vcpu, vector, err); > + kvm_queue_exception_e(vcpu, vector, err); > } else > - kvm_queue_exception(&vmx->vcpu, vector); > + kvm_queue_exception(vcpu, vector); > break; > case INTR_TYPE_SOFT_INTR: > - vmx->vcpu.arch.event_exit_inst_len = > - vmcs_read32(instr_len_field); > + vcpu->arch.event_exit_inst_len = vmcs_read32(instr_len_field); > /* fall through */ > case INTR_TYPE_EXT_INTR: > - kvm_queue_interrupt(&vmx->vcpu, vector, > - type == INTR_TYPE_SOFT_INTR); > + kvm_queue_interrupt(vcpu, vector, type == INTR_TYPE_SOFT_INTR); > break; > default: > break; > @@ -6489,7 +6486,7 @@ static void __vmx_complete_interrupts(struct vcpu_vmx *vmx, > > static void vmx_complete_interrupts(struct vcpu_vmx *vmx) > { > - __vmx_complete_interrupts(vmx, vmx->idt_vectoring_info, > + __vmx_complete_interrupts(&vmx->vcpu, vmx->idt_vectoring_info, > VM_EXIT_INSTRUCTION_LEN, > IDT_VECTORING_ERROR_CODE); > } > @@ -6498,7 +6495,7 @@ static void vmx_cancel_injection(struct kvm_vcpu *vcpu) > { > if (to_vmx(vcpu)->nested.nested_run_pending) > return; > - __vmx_complete_interrupts(to_vmx(vcpu), > + __vmx_complete_interrupts(vcpu, > vmcs_read32(VM_ENTRY_INTR_INFO_FIELD), > VM_ENTRY_INSTRUCTION_LEN, > VM_ENTRY_EXCEPTION_ERROR_CODE); > -- > 1.7.3.4 -- Gleb. -- To unsubscribe from this list: send the line "unsubscribe kvm" in the body of a message to majordomo@vger.kernel.org More majordomo info at http://vger.kernel.org/majordomo-info.html
diff --git a/arch/x86/kvm/vmx.c b/arch/x86/kvm/vmx.c index d99a519..d6ea4a7 100644 --- a/arch/x86/kvm/vmx.c +++ b/arch/x86/kvm/vmx.c @@ -6430,7 +6430,7 @@ static void vmx_recover_nmi_blocking(struct vcpu_vmx *vmx) ktime_to_ns(ktime_sub(ktime_get(), vmx->entry_time)); } -static void __vmx_complete_interrupts(struct vcpu_vmx *vmx, +static void __vmx_complete_interrupts(struct kvm_vcpu *vcpu, u32 idt_vectoring_info, int instr_len_field, int error_code_field) @@ -6441,46 +6441,43 @@ static void __vmx_complete_interrupts(struct vcpu_vmx *vmx, idtv_info_valid = idt_vectoring_info & VECTORING_INFO_VALID_MASK; - vmx->vcpu.arch.nmi_injected = false; - kvm_clear_exception_queue(&vmx->vcpu); - kvm_clear_interrupt_queue(&vmx->vcpu); + vcpu->arch.nmi_injected = false; + kvm_clear_exception_queue(vcpu); + kvm_clear_interrupt_queue(vcpu); if (!idtv_info_valid) return; - kvm_make_request(KVM_REQ_EVENT, &vmx->vcpu); + kvm_make_request(KVM_REQ_EVENT, vcpu); vector = idt_vectoring_info & VECTORING_INFO_VECTOR_MASK; type = idt_vectoring_info & VECTORING_INFO_TYPE_MASK; switch (type) { case INTR_TYPE_NMI_INTR: - vmx->vcpu.arch.nmi_injected = true; + vcpu->arch.nmi_injected = true; /* * SDM 3: 27.7.1.2 (September 2008) * Clear bit "block by NMI" before VM entry if a NMI * delivery faulted. */ - vmx_set_nmi_mask(&vmx->vcpu, false); + vmx_set_nmi_mask(vcpu, false); break; case INTR_TYPE_SOFT_EXCEPTION: - vmx->vcpu.arch.event_exit_inst_len = - vmcs_read32(instr_len_field); + vcpu->arch.event_exit_inst_len = vmcs_read32(instr_len_field); /* fall through */ case INTR_TYPE_HARD_EXCEPTION: if (idt_vectoring_info & VECTORING_INFO_DELIVER_CODE_MASK) { u32 err = vmcs_read32(error_code_field); - kvm_queue_exception_e(&vmx->vcpu, vector, err); + kvm_queue_exception_e(vcpu, vector, err); } else - kvm_queue_exception(&vmx->vcpu, vector); + kvm_queue_exception(vcpu, vector); break; case INTR_TYPE_SOFT_INTR: - vmx->vcpu.arch.event_exit_inst_len = - vmcs_read32(instr_len_field); + vcpu->arch.event_exit_inst_len = vmcs_read32(instr_len_field); /* fall through */ case INTR_TYPE_EXT_INTR: - kvm_queue_interrupt(&vmx->vcpu, vector, - type == INTR_TYPE_SOFT_INTR); + kvm_queue_interrupt(vcpu, vector, type == INTR_TYPE_SOFT_INTR); break; default: break; @@ -6489,7 +6486,7 @@ static void __vmx_complete_interrupts(struct vcpu_vmx *vmx, static void vmx_complete_interrupts(struct vcpu_vmx *vmx) { - __vmx_complete_interrupts(vmx, vmx->idt_vectoring_info, + __vmx_complete_interrupts(&vmx->vcpu, vmx->idt_vectoring_info, VM_EXIT_INSTRUCTION_LEN, IDT_VECTORING_ERROR_CODE); } @@ -6498,7 +6495,7 @@ static void vmx_cancel_injection(struct kvm_vcpu *vcpu) { if (to_vmx(vcpu)->nested.nested_run_pending) return; - __vmx_complete_interrupts(to_vmx(vcpu), + __vmx_complete_interrupts(vcpu, vmcs_read32(VM_ENTRY_INTR_INFO_FIELD), VM_ENTRY_INSTRUCTION_LEN, VM_ENTRY_EXCEPTION_ERROR_CODE);
Cleanup: __vmx_complete_interrupts has no use for the vmx structure. Signed-off-by: Jan Kiszka <jan.kiszka@siemens.com> --- Note: this applies on top of "Rework event injection and recovery" arch/x86/kvm/vmx.c | 31 ++++++++++++++----------------- 1 files changed, 14 insertions(+), 17 deletions(-)