Message ID | 20200427043514.16144-8-tianjia.zhang@linux.alibaba.com (mailing list archive) |
---|---|
State | New, archived |
Headers | show |
Series | clean up redundant 'kvm_run' parameters | expand |
Reviewed-by: Huacai Chen <chenhc@lemote.com> On Mon, Apr 27, 2020 at 12:35 PM Tianjia Zhang <tianjia.zhang@linux.alibaba.com> wrote: > > In the current kvm version, 'kvm_run' has been included in the 'kvm_vcpu' > structure. For historical reasons, many kvm-related function parameters > retain the 'kvm_run' and 'kvm_vcpu' parameters at the same time. This > patch does a unified cleanup of these remaining redundant parameters. > > Signed-off-by: Tianjia Zhang <tianjia.zhang@linux.alibaba.com> > --- > arch/mips/include/asm/kvm_host.h | 4 ++-- > arch/mips/kvm/entry.c | 21 ++++++++------------- > arch/mips/kvm/mips.c | 3 ++- > arch/mips/kvm/trap_emul.c | 2 +- > arch/mips/kvm/vz.c | 2 +- > 5 files changed, 14 insertions(+), 18 deletions(-) > > diff --git a/arch/mips/include/asm/kvm_host.h b/arch/mips/include/asm/kvm_host.h > index 971439297cea..db915c55166d 100644 > --- a/arch/mips/include/asm/kvm_host.h > +++ b/arch/mips/include/asm/kvm_host.h > @@ -310,7 +310,7 @@ struct kvm_mmu_memory_cache { > #define KVM_MIPS_GUEST_TLB_SIZE 64 > struct kvm_vcpu_arch { > void *guest_ebase; > - int (*vcpu_run)(struct kvm_run *run, struct kvm_vcpu *vcpu); > + int (*vcpu_run)(struct kvm_vcpu *vcpu); > > /* Host registers preserved across guest mode execution */ > unsigned long host_stack; > @@ -821,7 +821,7 @@ int kvm_mips_emulation_init(struct kvm_mips_callbacks **install_callbacks); > /* Debug: dump vcpu state */ > int kvm_arch_vcpu_dump_regs(struct kvm_vcpu *vcpu); > > -extern int kvm_mips_handle_exit(struct kvm_run *run, struct kvm_vcpu *vcpu); > +extern int kvm_mips_handle_exit(struct kvm_vcpu *vcpu); > > /* Building of entry/exception code */ > int kvm_mips_entry_setup(void); > diff --git a/arch/mips/kvm/entry.c b/arch/mips/kvm/entry.c > index 16e1c93b484f..1083f35361ea 100644 > --- a/arch/mips/kvm/entry.c > +++ b/arch/mips/kvm/entry.c > @@ -204,7 +204,7 @@ static inline void build_set_exc_base(u32 **p, unsigned int reg) > * Assemble the start of the vcpu_run function to run a guest VCPU. The function > * conforms to the following prototype: > * > - * int vcpu_run(struct kvm_run *run, struct kvm_vcpu *vcpu); > + * int vcpu_run(struct kvm_vcpu *vcpu); > * > * The exit from the guest and return to the caller is handled by the code > * generated by kvm_mips_build_ret_to_host(). > @@ -217,8 +217,7 @@ void *kvm_mips_build_vcpu_run(void *addr) > unsigned int i; > > /* > - * A0: run > - * A1: vcpu > + * A0: vcpu > */ > > /* k0/k1 not being used in host kernel context */ > @@ -237,10 +236,10 @@ void *kvm_mips_build_vcpu_run(void *addr) > kvm_mips_build_save_scratch(&p, V1, K1); > > /* VCPU scratch register has pointer to vcpu */ > - UASM_i_MTC0(&p, A1, scratch_vcpu[0], scratch_vcpu[1]); > + UASM_i_MTC0(&p, A0, scratch_vcpu[0], scratch_vcpu[1]); > > /* Offset into vcpu->arch */ > - UASM_i_ADDIU(&p, K1, A1, offsetof(struct kvm_vcpu, arch)); > + UASM_i_ADDIU(&p, K1, A0, offsetof(struct kvm_vcpu, arch)); > > /* > * Save the host stack to VCPU, used for exception processing > @@ -628,10 +627,7 @@ void *kvm_mips_build_exit(void *addr) > /* Now that context has been saved, we can use other registers */ > > /* Restore vcpu */ > - UASM_i_MFC0(&p, S1, scratch_vcpu[0], scratch_vcpu[1]); > - > - /* Restore run (vcpu->run) */ > - UASM_i_LW(&p, S0, offsetof(struct kvm_vcpu, run), S1); > + UASM_i_MFC0(&p, S0, scratch_vcpu[0], scratch_vcpu[1]); > > /* > * Save Host level EPC, BadVaddr and Cause to VCPU, useful to process > @@ -793,7 +789,6 @@ void *kvm_mips_build_exit(void *addr) > * with this in the kernel > */ > uasm_i_move(&p, A0, S0); > - uasm_i_move(&p, A1, S1); > UASM_i_LA(&p, T9, (unsigned long)kvm_mips_handle_exit); > uasm_i_jalr(&p, RA, T9); > UASM_i_ADDIU(&p, SP, SP, -CALLFRAME_SIZ); > @@ -835,7 +830,7 @@ static void *kvm_mips_build_ret_from_exit(void *addr) > * guest, reload k1 > */ > > - uasm_i_move(&p, K1, S1); > + uasm_i_move(&p, K1, S0); > UASM_i_ADDIU(&p, K1, K1, offsetof(struct kvm_vcpu, arch)); > > /* > @@ -869,8 +864,8 @@ static void *kvm_mips_build_ret_to_guest(void *addr) > { > u32 *p = addr; > > - /* Put the saved pointer to vcpu (s1) back into the scratch register */ > - UASM_i_MTC0(&p, S1, scratch_vcpu[0], scratch_vcpu[1]); > + /* Put the saved pointer to vcpu (s0) back into the scratch register */ > + UASM_i_MTC0(&p, S0, scratch_vcpu[0], scratch_vcpu[1]); > > /* Load up the Guest EBASE to minimize the window where BEV is set */ > UASM_i_LW(&p, T0, offsetof(struct kvm_vcpu_arch, guest_ebase), K1); > diff --git a/arch/mips/kvm/mips.c b/arch/mips/kvm/mips.c > index 9710477a9827..32850470c037 100644 > --- a/arch/mips/kvm/mips.c > +++ b/arch/mips/kvm/mips.c > @@ -1186,8 +1186,9 @@ static void kvm_mips_set_c0_status(void) > /* > * Return value is in the form (errcode<<2 | RESUME_FLAG_HOST | RESUME_FLAG_NV) > */ > -int kvm_mips_handle_exit(struct kvm_run *run, struct kvm_vcpu *vcpu) > +int kvm_mips_handle_exit(struct kvm_vcpu *vcpu) > { > + struct kvm_run *run = vcpu->run; > u32 cause = vcpu->arch.host_cp0_cause; > u32 exccode = (cause >> CAUSEB_EXCCODE) & 0x1f; > u32 __user *opc = (u32 __user *) vcpu->arch.pc; > diff --git a/arch/mips/kvm/trap_emul.c b/arch/mips/kvm/trap_emul.c > index d822f3aee3dc..04c864cc356a 100644 > --- a/arch/mips/kvm/trap_emul.c > +++ b/arch/mips/kvm/trap_emul.c > @@ -1238,7 +1238,7 @@ static int kvm_trap_emul_vcpu_run(struct kvm_vcpu *vcpu) > */ > kvm_mips_suspend_mm(cpu); > > - r = vcpu->arch.vcpu_run(vcpu->run, vcpu); > + r = vcpu->arch.vcpu_run(vcpu); > > /* We may have migrated while handling guest exits */ > cpu = smp_processor_id(); > diff --git a/arch/mips/kvm/vz.c b/arch/mips/kvm/vz.c > index 94f1d23828e3..c5878fa0636d 100644 > --- a/arch/mips/kvm/vz.c > +++ b/arch/mips/kvm/vz.c > @@ -3152,7 +3152,7 @@ static int kvm_vz_vcpu_run(struct kvm_vcpu *vcpu) > kvm_vz_vcpu_load_tlb(vcpu, cpu); > kvm_vz_vcpu_load_wired(vcpu); > > - r = vcpu->arch.vcpu_run(vcpu->run, vcpu); > + r = vcpu->arch.vcpu_run(vcpu); > > kvm_vz_vcpu_save_wired(vcpu); > > -- > 2.17.1 >
diff --git a/arch/mips/include/asm/kvm_host.h b/arch/mips/include/asm/kvm_host.h index 971439297cea..db915c55166d 100644 --- a/arch/mips/include/asm/kvm_host.h +++ b/arch/mips/include/asm/kvm_host.h @@ -310,7 +310,7 @@ struct kvm_mmu_memory_cache { #define KVM_MIPS_GUEST_TLB_SIZE 64 struct kvm_vcpu_arch { void *guest_ebase; - int (*vcpu_run)(struct kvm_run *run, struct kvm_vcpu *vcpu); + int (*vcpu_run)(struct kvm_vcpu *vcpu); /* Host registers preserved across guest mode execution */ unsigned long host_stack; @@ -821,7 +821,7 @@ int kvm_mips_emulation_init(struct kvm_mips_callbacks **install_callbacks); /* Debug: dump vcpu state */ int kvm_arch_vcpu_dump_regs(struct kvm_vcpu *vcpu); -extern int kvm_mips_handle_exit(struct kvm_run *run, struct kvm_vcpu *vcpu); +extern int kvm_mips_handle_exit(struct kvm_vcpu *vcpu); /* Building of entry/exception code */ int kvm_mips_entry_setup(void); diff --git a/arch/mips/kvm/entry.c b/arch/mips/kvm/entry.c index 16e1c93b484f..1083f35361ea 100644 --- a/arch/mips/kvm/entry.c +++ b/arch/mips/kvm/entry.c @@ -204,7 +204,7 @@ static inline void build_set_exc_base(u32 **p, unsigned int reg) * Assemble the start of the vcpu_run function to run a guest VCPU. The function * conforms to the following prototype: * - * int vcpu_run(struct kvm_run *run, struct kvm_vcpu *vcpu); + * int vcpu_run(struct kvm_vcpu *vcpu); * * The exit from the guest and return to the caller is handled by the code * generated by kvm_mips_build_ret_to_host(). @@ -217,8 +217,7 @@ void *kvm_mips_build_vcpu_run(void *addr) unsigned int i; /* - * A0: run - * A1: vcpu + * A0: vcpu */ /* k0/k1 not being used in host kernel context */ @@ -237,10 +236,10 @@ void *kvm_mips_build_vcpu_run(void *addr) kvm_mips_build_save_scratch(&p, V1, K1); /* VCPU scratch register has pointer to vcpu */ - UASM_i_MTC0(&p, A1, scratch_vcpu[0], scratch_vcpu[1]); + UASM_i_MTC0(&p, A0, scratch_vcpu[0], scratch_vcpu[1]); /* Offset into vcpu->arch */ - UASM_i_ADDIU(&p, K1, A1, offsetof(struct kvm_vcpu, arch)); + UASM_i_ADDIU(&p, K1, A0, offsetof(struct kvm_vcpu, arch)); /* * Save the host stack to VCPU, used for exception processing @@ -628,10 +627,7 @@ void *kvm_mips_build_exit(void *addr) /* Now that context has been saved, we can use other registers */ /* Restore vcpu */ - UASM_i_MFC0(&p, S1, scratch_vcpu[0], scratch_vcpu[1]); - - /* Restore run (vcpu->run) */ - UASM_i_LW(&p, S0, offsetof(struct kvm_vcpu, run), S1); + UASM_i_MFC0(&p, S0, scratch_vcpu[0], scratch_vcpu[1]); /* * Save Host level EPC, BadVaddr and Cause to VCPU, useful to process @@ -793,7 +789,6 @@ void *kvm_mips_build_exit(void *addr) * with this in the kernel */ uasm_i_move(&p, A0, S0); - uasm_i_move(&p, A1, S1); UASM_i_LA(&p, T9, (unsigned long)kvm_mips_handle_exit); uasm_i_jalr(&p, RA, T9); UASM_i_ADDIU(&p, SP, SP, -CALLFRAME_SIZ); @@ -835,7 +830,7 @@ static void *kvm_mips_build_ret_from_exit(void *addr) * guest, reload k1 */ - uasm_i_move(&p, K1, S1); + uasm_i_move(&p, K1, S0); UASM_i_ADDIU(&p, K1, K1, offsetof(struct kvm_vcpu, arch)); /* @@ -869,8 +864,8 @@ static void *kvm_mips_build_ret_to_guest(void *addr) { u32 *p = addr; - /* Put the saved pointer to vcpu (s1) back into the scratch register */ - UASM_i_MTC0(&p, S1, scratch_vcpu[0], scratch_vcpu[1]); + /* Put the saved pointer to vcpu (s0) back into the scratch register */ + UASM_i_MTC0(&p, S0, scratch_vcpu[0], scratch_vcpu[1]); /* Load up the Guest EBASE to minimize the window where BEV is set */ UASM_i_LW(&p, T0, offsetof(struct kvm_vcpu_arch, guest_ebase), K1); diff --git a/arch/mips/kvm/mips.c b/arch/mips/kvm/mips.c index 9710477a9827..32850470c037 100644 --- a/arch/mips/kvm/mips.c +++ b/arch/mips/kvm/mips.c @@ -1186,8 +1186,9 @@ static void kvm_mips_set_c0_status(void) /* * Return value is in the form (errcode<<2 | RESUME_FLAG_HOST | RESUME_FLAG_NV) */ -int kvm_mips_handle_exit(struct kvm_run *run, struct kvm_vcpu *vcpu) +int kvm_mips_handle_exit(struct kvm_vcpu *vcpu) { + struct kvm_run *run = vcpu->run; u32 cause = vcpu->arch.host_cp0_cause; u32 exccode = (cause >> CAUSEB_EXCCODE) & 0x1f; u32 __user *opc = (u32 __user *) vcpu->arch.pc; diff --git a/arch/mips/kvm/trap_emul.c b/arch/mips/kvm/trap_emul.c index d822f3aee3dc..04c864cc356a 100644 --- a/arch/mips/kvm/trap_emul.c +++ b/arch/mips/kvm/trap_emul.c @@ -1238,7 +1238,7 @@ static int kvm_trap_emul_vcpu_run(struct kvm_vcpu *vcpu) */ kvm_mips_suspend_mm(cpu); - r = vcpu->arch.vcpu_run(vcpu->run, vcpu); + r = vcpu->arch.vcpu_run(vcpu); /* We may have migrated while handling guest exits */ cpu = smp_processor_id(); diff --git a/arch/mips/kvm/vz.c b/arch/mips/kvm/vz.c index 94f1d23828e3..c5878fa0636d 100644 --- a/arch/mips/kvm/vz.c +++ b/arch/mips/kvm/vz.c @@ -3152,7 +3152,7 @@ static int kvm_vz_vcpu_run(struct kvm_vcpu *vcpu) kvm_vz_vcpu_load_tlb(vcpu, cpu); kvm_vz_vcpu_load_wired(vcpu); - r = vcpu->arch.vcpu_run(vcpu->run, vcpu); + r = vcpu->arch.vcpu_run(vcpu); kvm_vz_vcpu_save_wired(vcpu);
In the current kvm version, 'kvm_run' has been included in the 'kvm_vcpu' structure. For historical reasons, many kvm-related function parameters retain the 'kvm_run' and 'kvm_vcpu' parameters at the same time. This patch does a unified cleanup of these remaining redundant parameters. Signed-off-by: Tianjia Zhang <tianjia.zhang@linux.alibaba.com> --- arch/mips/include/asm/kvm_host.h | 4 ++-- arch/mips/kvm/entry.c | 21 ++++++++------------- arch/mips/kvm/mips.c | 3 ++- arch/mips/kvm/trap_emul.c | 2 +- arch/mips/kvm/vz.c | 2 +- 5 files changed, 14 insertions(+), 18 deletions(-)