@@ -317,7 +317,7 @@ struct kvm_vcpu_arch {
unsigned int aux_inuse;
/* COP0 State */
- struct mips_coproc *cop0;
+ struct mips_coproc cop0;
/* Resume PC after MMIO completion */
unsigned long io_pc;
@@ -698,7 +698,7 @@ static inline bool kvm_mips_guest_can_have_fpu(struct kvm_vcpu_arch *vcpu)
static inline bool kvm_mips_guest_has_fpu(struct kvm_vcpu_arch *vcpu)
{
return kvm_mips_guest_can_have_fpu(vcpu) &&
- kvm_read_c0_guest_config1(vcpu->cop0) & MIPS_CONF1_FP;
+ kvm_read_c0_guest_config1(&vcpu->cop0) & MIPS_CONF1_FP;
}
static inline bool kvm_mips_guest_can_have_msa(struct kvm_vcpu_arch *vcpu)
@@ -710,7 +710,7 @@ static inline bool kvm_mips_guest_can_have_msa(struct kvm_vcpu_arch *vcpu)
static inline bool kvm_mips_guest_has_msa(struct kvm_vcpu_arch *vcpu)
{
return kvm_mips_guest_can_have_msa(vcpu) &&
- kvm_read_c0_guest_config3(vcpu->cop0) & MIPS_CONF3_MSA;
+ kvm_read_c0_guest_config3(&vcpu->cop0) & MIPS_CONF3_MSA;
}
struct kvm_mips_callbacks {
@@ -312,7 +312,7 @@ int kvm_get_badinstrp(u32 *opc, struct kvm_vcpu *vcpu, u32 *out)
*/
int kvm_mips_count_disabled(struct kvm_vcpu *vcpu)
{
- struct mips_coproc *cop0 = vcpu->arch.cop0;
+ struct mips_coproc *cop0 = &vcpu->arch.cop0;
return (vcpu->arch.count_ctl & KVM_REG_MIPS_COUNT_CTL_DC) ||
(kvm_read_c0_guest_cause(cop0) & CAUSEF_DC);
@@ -384,7 +384,7 @@ static inline ktime_t kvm_mips_count_time(struct kvm_vcpu *vcpu)
*/
static u32 kvm_mips_read_count_running(struct kvm_vcpu *vcpu, ktime_t now)
{
- struct mips_coproc *cop0 = vcpu->arch.cop0;
+ struct mips_coproc *cop0 = &vcpu->arch.cop0;
ktime_t expires, threshold;
u32 count, compare;
int running;
@@ -444,7 +444,7 @@ static u32 kvm_mips_read_count_running(struct kvm_vcpu *vcpu, ktime_t now)
*/
u32 kvm_mips_read_count(struct kvm_vcpu *vcpu)
{
- struct mips_coproc *cop0 = vcpu->arch.cop0;
+ struct mips_coproc *cop0 = &vcpu->arch.cop0;
/* If count disabled just read static copy of count */
if (kvm_mips_count_disabled(vcpu))
@@ -502,7 +502,7 @@ ktime_t kvm_mips_freeze_hrtimer(struct kvm_vcpu *vcpu, u32 *count)
static void kvm_mips_resume_hrtimer(struct kvm_vcpu *vcpu,
ktime_t now, u32 count)
{
- struct mips_coproc *cop0 = vcpu->arch.cop0;
+ struct mips_coproc *cop0 = &vcpu->arch.cop0;
u32 compare;
u64 delta;
ktime_t expire;
@@ -603,7 +603,7 @@ int kvm_mips_restore_hrtimer(struct kvm_vcpu *vcpu, ktime_t before,
*/
void kvm_mips_write_count(struct kvm_vcpu *vcpu, u32 count)
{
- struct mips_coproc *cop0 = vcpu->arch.cop0;
+ struct mips_coproc *cop0 = &vcpu->arch.cop0;
ktime_t now;
/* Calculate bias */
@@ -649,7 +649,7 @@ void kvm_mips_init_count(struct kvm_vcpu *vcpu, unsigned long count_hz)
*/
int kvm_mips_set_count_hz(struct kvm_vcpu *vcpu, s64 count_hz)
{
- struct mips_coproc *cop0 = vcpu->arch.cop0;
+ struct mips_coproc *cop0 = &vcpu->arch.cop0;
int dc;
ktime_t now;
u32 count;
@@ -696,7 +696,7 @@ int kvm_mips_set_count_hz(struct kvm_vcpu *vcpu, s64 count_hz)
*/
void kvm_mips_write_compare(struct kvm_vcpu *vcpu, u32 compare, bool ack)
{
- struct mips_coproc *cop0 = vcpu->arch.cop0;
+ struct mips_coproc *cop0 = &vcpu->arch.cop0;
int dc;
u32 old_compare = kvm_read_c0_guest_compare(cop0);
s32 delta = compare - old_compare;
@@ -779,7 +779,7 @@ void kvm_mips_write_compare(struct kvm_vcpu *vcpu, u32 compare, bool ack)
*/
static ktime_t kvm_mips_count_disable(struct kvm_vcpu *vcpu)
{
- struct mips_coproc *cop0 = vcpu->arch.cop0;
+ struct mips_coproc *cop0 = &vcpu->arch.cop0;
u32 count;
ktime_t now;
@@ -806,7 +806,7 @@ static ktime_t kvm_mips_count_disable(struct kvm_vcpu *vcpu)
*/
void kvm_mips_count_disable_cause(struct kvm_vcpu *vcpu)
{
- struct mips_coproc *cop0 = vcpu->arch.cop0;
+ struct mips_coproc *cop0 = &vcpu->arch.cop0;
kvm_set_c0_guest_cause(cop0, CAUSEF_DC);
if (!(vcpu->arch.count_ctl & KVM_REG_MIPS_COUNT_CTL_DC))
@@ -826,7 +826,7 @@ void kvm_mips_count_disable_cause(struct kvm_vcpu *vcpu)
*/
void kvm_mips_count_enable_cause(struct kvm_vcpu *vcpu)
{
- struct mips_coproc *cop0 = vcpu->arch.cop0;
+ struct mips_coproc *cop0 = &vcpu->arch.cop0;
u32 count;
kvm_clear_c0_guest_cause(cop0, CAUSEF_DC);
@@ -852,7 +852,7 @@ void kvm_mips_count_enable_cause(struct kvm_vcpu *vcpu)
*/
int kvm_mips_set_count_ctl(struct kvm_vcpu *vcpu, s64 count_ctl)
{
- struct mips_coproc *cop0 = vcpu->arch.cop0;
+ struct mips_coproc *cop0 = &vcpu->arch.cop0;
s64 changed = count_ctl ^ vcpu->arch.count_ctl;
s64 delta;
ktime_t expire, now;
@@ -649,7 +649,7 @@ static int kvm_mips_copy_reg_indices(struct kvm_vcpu *vcpu, u64 __user *indices)
static int kvm_mips_get_reg(struct kvm_vcpu *vcpu,
const struct kvm_one_reg *reg)
{
- struct mips_coproc *cop0 = vcpu->arch.cop0;
+ struct mips_coproc *cop0 = &vcpu->arch.cop0;
struct mips_fpu_struct *fpu = &vcpu->arch.fpu;
int ret;
s64 v;
@@ -761,7 +761,7 @@ static int kvm_mips_get_reg(struct kvm_vcpu *vcpu,
static int kvm_mips_set_reg(struct kvm_vcpu *vcpu,
const struct kvm_one_reg *reg)
{
- struct mips_coproc *cop0 = vcpu->arch.cop0;
+ struct mips_coproc *cop0 = &vcpu->arch.cop0;
struct mips_fpu_struct *fpu = &vcpu->arch.fpu;
s64 v;
s64 vs[2];
@@ -1086,7 +1086,7 @@ int kvm_vm_ioctl_check_extension(struct kvm *kvm, long ext)
int kvm_cpu_has_pending_timer(struct kvm_vcpu *vcpu)
{
return kvm_mips_pending_timer(vcpu) ||
- kvm_read_c0_guest_cause(vcpu->arch.cop0) & C_TI;
+ kvm_read_c0_guest_cause(&vcpu->arch.cop0) & C_TI;
}
int kvm_arch_vcpu_dump_regs(struct kvm_vcpu *vcpu)
@@ -1110,7 +1110,7 @@ int kvm_arch_vcpu_dump_regs(struct kvm_vcpu *vcpu)
kvm_debug("\thi: 0x%08lx\n", vcpu->arch.hi);
kvm_debug("\tlo: 0x%08lx\n", vcpu->arch.lo);
- cop0 = vcpu->arch.cop0;
+ cop0 = &vcpu->arch.cop0;
kvm_debug("\tStatus: 0x%08x, Cause: 0x%08x\n",
kvm_read_c0_guest_status(cop0),
kvm_read_c0_guest_cause(cop0));
@@ -1232,7 +1232,7 @@ static int __kvm_mips_handle_exit(struct kvm_vcpu *vcpu)
case EXCCODE_TLBS:
kvm_debug("TLB ST fault: cause %#x, status %#x, PC: %p, BadVaddr: %#lx\n",
- cause, kvm_read_c0_guest_status(vcpu->arch.cop0), opc,
+ cause, kvm_read_c0_guest_status(&vcpu->arch.cop0), opc,
badvaddr);
++vcpu->stat.tlbmiss_st_exits;
@@ -1304,7 +1304,7 @@ static int __kvm_mips_handle_exit(struct kvm_vcpu *vcpu)
kvm_get_badinstr(opc, vcpu, &inst);
kvm_err("Exception Code: %d, not yet handled, @ PC: %p, inst: 0x%08x BadVaddr: %#lx Status: %#x\n",
exccode, opc, inst, badvaddr,
- kvm_read_c0_guest_status(vcpu->arch.cop0));
+ kvm_read_c0_guest_status(&vcpu->arch.cop0));
kvm_arch_vcpu_dump_regs(vcpu);
run->exit_reason = KVM_EXIT_INTERNAL_ERROR;
ret = RESUME_HOST;
@@ -1377,7 +1377,7 @@ int noinstr kvm_mips_handle_exit(struct kvm_vcpu *vcpu)
/* Enable FPU for guest and restore context */
void kvm_own_fpu(struct kvm_vcpu *vcpu)
{
- struct mips_coproc *cop0 = vcpu->arch.cop0;
+ struct mips_coproc *cop0 = &vcpu->arch.cop0;
unsigned int sr, cfg5;
preempt_disable();
@@ -1421,7 +1421,7 @@ void kvm_own_fpu(struct kvm_vcpu *vcpu)
/* Enable MSA for guest and restore context */
void kvm_own_msa(struct kvm_vcpu *vcpu)
{
- struct mips_coproc *cop0 = vcpu->arch.cop0;
+ struct mips_coproc *cop0 = &vcpu->arch.cop0;
unsigned int sr, cfg5;
preempt_disable();
@@ -322,11 +322,11 @@ TRACE_EVENT_FN(kvm_guest_mode_change,
),
TP_fast_assign(
- __entry->epc = kvm_read_c0_guest_epc(vcpu->arch.cop0);
+ __entry->epc = kvm_read_c0_guest_epc(&vcpu->arch.cop0);
__entry->pc = vcpu->arch.pc;
- __entry->badvaddr = kvm_read_c0_guest_badvaddr(vcpu->arch.cop0);
- __entry->status = kvm_read_c0_guest_status(vcpu->arch.cop0);
- __entry->cause = kvm_read_c0_guest_cause(vcpu->arch.cop0);
+ __entry->badvaddr = kvm_read_c0_guest_badvaddr(&vcpu->arch.cop0);
+ __entry->status = kvm_read_c0_guest_status(&vcpu->arch.cop0);
+ __entry->cause = kvm_read_c0_guest_cause(&vcpu->arch.cop0);
),
TP_printk("EPC: 0x%08lx PC: 0x%08lx Status: 0x%08x Cause: 0x%08x BadVAddr: 0x%08lx",
@@ -422,7 +422,7 @@ static void _kvm_vz_restore_htimer(struct kvm_vcpu *vcpu,
*/
static void kvm_vz_restore_timer(struct kvm_vcpu *vcpu)
{
- struct mips_coproc *cop0 = vcpu->arch.cop0;
+ struct mips_coproc *cop0 = &vcpu->arch.cop0;
u32 cause, compare;
compare = kvm_read_sw_gc0_compare(cop0);
@@ -517,7 +517,7 @@ static void _kvm_vz_save_htimer(struct kvm_vcpu *vcpu,
*/
static void kvm_vz_save_timer(struct kvm_vcpu *vcpu)
{
- struct mips_coproc *cop0 = vcpu->arch.cop0;
+ struct mips_coproc *cop0 = &vcpu->arch.cop0;
u32 gctl0, compare, cause;
gctl0 = read_c0_guestctl0();
@@ -863,7 +863,7 @@ static unsigned long mips_process_maar(unsigned int op, unsigned long val)
static void kvm_write_maari(struct kvm_vcpu *vcpu, unsigned long val)
{
- struct mips_coproc *cop0 = vcpu->arch.cop0;
+ struct mips_coproc *cop0 = &vcpu->arch.cop0;
val &= MIPS_MAARI_INDEX;
if (val == MIPS_MAARI_INDEX)
@@ -876,7 +876,7 @@ static enum emulation_result kvm_vz_gpsi_cop0(union mips_instruction inst,
u32 *opc, u32 cause,
struct kvm_vcpu *vcpu)
{
- struct mips_coproc *cop0 = vcpu->arch.cop0;
+ struct mips_coproc *cop0 = &vcpu->arch.cop0;
enum emulation_result er = EMULATE_DONE;
u32 rt, rd, sel;
unsigned long curr_pc;
@@ -1911,7 +1911,7 @@ static int kvm_vz_get_one_reg(struct kvm_vcpu *vcpu,
const struct kvm_one_reg *reg,
s64 *v)
{
- struct mips_coproc *cop0 = vcpu->arch.cop0;
+ struct mips_coproc *cop0 = &vcpu->arch.cop0;
unsigned int idx;
switch (reg->id) {
@@ -2081,7 +2081,7 @@ static int kvm_vz_get_one_reg(struct kvm_vcpu *vcpu,
case KVM_REG_MIPS_CP0_MAARI:
if (!cpu_guest_has_maar || cpu_guest_has_dyn_maar)
return -EINVAL;
- *v = kvm_read_sw_gc0_maari(vcpu->arch.cop0);
+ *v = kvm_read_sw_gc0_maari(&vcpu->arch.cop0);
break;
#ifdef CONFIG_64BIT
case KVM_REG_MIPS_CP0_XCONTEXT:
@@ -2135,7 +2135,7 @@ static int kvm_vz_set_one_reg(struct kvm_vcpu *vcpu,
const struct kvm_one_reg *reg,
s64 v)
{
- struct mips_coproc *cop0 = vcpu->arch.cop0;
+ struct mips_coproc *cop0 = &vcpu->arch.cop0;
unsigned int idx;
int ret = 0;
unsigned int cur, change;
@@ -2562,7 +2562,7 @@ static void kvm_vz_vcpu_load_tlb(struct kvm_vcpu *vcpu, int cpu)
static int kvm_vz_vcpu_load(struct kvm_vcpu *vcpu, int cpu)
{
- struct mips_coproc *cop0 = vcpu->arch.cop0;
+ struct mips_coproc *cop0 = &vcpu->arch.cop0;
bool migrated, all;
/*
@@ -2704,7 +2704,7 @@ static int kvm_vz_vcpu_load(struct kvm_vcpu *vcpu, int cpu)
static int kvm_vz_vcpu_put(struct kvm_vcpu *vcpu, int cpu)
{
- struct mips_coproc *cop0 = vcpu->arch.cop0;
+ struct mips_coproc *cop0 = &vcpu->arch.cop0;
if (current->flags & PF_VCPU)
kvm_vz_vcpu_save_wired(vcpu);
@@ -3076,7 +3076,7 @@ static void kvm_vz_vcpu_uninit(struct kvm_vcpu *vcpu)
static int kvm_vz_vcpu_setup(struct kvm_vcpu *vcpu)
{
- struct mips_coproc *cop0 = vcpu->arch.cop0;
+ struct mips_coproc *cop0 = &vcpu->arch.cop0;
unsigned long count_hz = 100*1000*1000; /* default to 100 MHz */
/*