@@ -449,35 +449,34 @@ static u32 group2_table[] = {
/* Raw emulation: instruction has two explicit operands. */
-#define __emulate_2op_nobyte(_op,_src,_dst,_eflags,_wx,_wy,_lx,_ly,_qx,_qy) \
- do { \
- unsigned long _tmp; \
- \
- switch ((_dst).bytes) { \
- case 2: \
- ____emulate_2op(_op,_src,_dst,_eflags,_wx,_wy,"w"); \
- break; \
- case 4: \
- ____emulate_2op(_op,_src,_dst,_eflags,_lx,_ly,"l"); \
- break; \
- case 8: \
- ON64(____emulate_2op(_op,_src,_dst,_eflags,_qx,_qy,"q")); \
- break; \
- } \
+#define __emulate_2op_nobyte(_op, _src, _dst, _eflags, _wx, _wy, _lx, _ly, _qx, _qy) \
+ do { \
+ unsigned long _tmp; \
+ switch ((_dst).bytes) { \
+ case 2: \
+ ____emulate_2op(_op, _src, _dst, _eflags, _wx, _wy, "w"); \
+ break; \
+ case 4: \
+ ____emulate_2op(_op, _src, _dst, _eflags, _lx, _ly, "l"); \
+ break; \
+ case 8: \
+ ON64(____emulate_2op(_op, _src, _dst, _eflags, _qx, _qy, "q")); \
+ break; \
+ } \
} while (0)
-#define __emulate_2op(_op,_src,_dst,_eflags,_bx,_by,_wx,_wy,_lx,_ly,_qx,_qy) \
- do { \
- unsigned long _tmp; \
- switch ((_dst).bytes) { \
- case 1: \
- ____emulate_2op(_op,_src,_dst,_eflags,_bx,_by,"b"); \
- break; \
- default: \
- __emulate_2op_nobyte(_op, _src, _dst, _eflags, \
- _wx, _wy, _lx, _ly, _qx, _qy); \
- break; \
- } \
+#define __emulate_2op(_op, _src, _dst, _eflags, _bx, _by, _wx, _wy, _lx, _ly, _qx, _qy) \
+ do { \
+ unsigned long _tmp; \
+ switch ((_dst).bytes) { \
+ case 1: \
+ ____emulate_2op(_op, _src, _dst, _eflags, _bx, _by, "b"); \
+ break; \
+ default: \
+ __emulate_2op_nobyte(_op, _src, _dst, _eflags, \
+ _wx, _wy, _lx, _ly, _qx, _qy); \
+ break; \
+ } \
} while (0)
/* Source operand is byte-sized and may be restricted to just %cl. */
@@ -499,17 +498,17 @@ static u32 group2_table[] = {
#define __emulate_2op_cl(_op, _cl, _src, _dst, _eflags, _suffix, _type) \
do { \
unsigned long _tmp; \
- _type _clv = (_cl).val; \
- _type _srcv = (_src).val; \
+ _type _clv = (_cl).val; \
+ _type _srcv = (_src).val; \
_type _dstv = (_dst).val; \
\
__asm__ __volatile__ ( \
_PRE_EFLAGS("0", "5", "2") \
- _op _suffix " %4,%1 \n" \
+ _op _suffix " %4,%1\n" \
_POST_EFLAGS("0", "5", "2") \
: "=m" (_eflags), "+r" (_dstv), "=&r" (_tmp) \
: "c" (_clv) , "r" (_srcv), "i" (EFLAGS_MASK) \
- ); \
+ ); \
\
(_cl).val = (unsigned long) _clv; \
(_src).val = (unsigned long) _srcv; \
@@ -520,16 +519,16 @@ static u32 group2_table[] = {
do { \
switch ((_dst).bytes) { \
case 2: \
- __emulate_2op_cl(_op, _cl, _src, _dst, _eflags, \
- "w", unsigned short); \
+ __emulate_2op_cl(_op, _cl, _src, _dst, _eflags, \
+ "w", unsigned short); \
break; \
- case 4: \
- __emulate_2op_cl(_op, _cl, _src, _dst, _eflags, \
- "l", unsigned int); \
+ case 4: \
+ __emulate_2op_cl(_op, _cl, _src, _dst, _eflags, \
+ "l", unsigned int); \
break; \
case 8: \
ON64(__emulate_2op_cl(_op, _cl, _src, _dst, _eflags, \
- "q", unsigned long)); \
+ "q", unsigned long)); \
break; \
} \
} while (0)
@@ -741,7 +740,7 @@ static int test_cc(unsigned int condition, unsigned int flags)
}
/* Odd condition identifiers (lsb == 1) have inverted sense. */
- return (!!rc ^ (condition & 1));
+ return !!rc ^ (condition & 1);
}
static void decode_register_operand(struct operand *op,
@@ -1194,15 +1193,15 @@ done_prefixes:
c->dst.bytes = c->op_bytes;
c->dst.ptr = &c->regs[VCPU_REGS_RAX];
switch (c->op_bytes) {
- case 1:
- c->dst.val = *(u8 *)c->dst.ptr;
- break;
- case 2:
- c->dst.val = *(u16 *)c->dst.ptr;
- break;
- case 4:
- c->dst.val = *(u32 *)c->dst.ptr;
- break;
+ case 1:
+ c->dst.val = *(u8 *)c->dst.ptr;
+ break;
+ case 2:
+ c->dst.val = *(u16 *)c->dst.ptr;
+ break;
+ case 4:
+ c->dst.val = *(u32 *)c->dst.ptr;
+ break;
}
c->dst.orig_val = c->dst.val;
break;
@@ -1260,7 +1259,7 @@ static int emulate_popf(struct x86_emulate_ctxt *ctxt,
change_mask = EFLG_CF | EFLG_PF | EFLG_AF | EFLG_ZF | EFLG_SF | EFLG_OF
| EFLG_TF | EFLG_DF | EFLG_NT | EFLG_RF | EFLG_AC | EFLG_ID;
- switch(ctxt->mode) {
+ switch (ctxt->mode) {
case X86EMUL_MODE_PROT64:
case X86EMUL_MODE_PROT32:
case X86EMUL_MODE_PROT16:
@@ -1853,8 +1852,8 @@ x86_emulate_insn(struct x86_emulate_ctxt *ctxt, struct x86_emulate_ops *ops)
* and REPNE. Test if the repeat string operation prefix is
* REPE/REPZ or REPNE/REPNZ and if it's the case it tests the
* corresponding termination condition according to:
- * - if REPE/REPZ and ZF = 0 then done
- * - if REPNE/REPNZ and ZF = 1 then done
+ * - if REPE/REPZ and ZF = 0 then done
+ * - if REPNE/REPNZ and ZF = 1 then done
*/
if ((c->b == 0xa6) || (c->b == 0xa7) ||
(c->b == 0xae) || (c->b == 0xaf)) {
@@ -1918,7 +1917,7 @@ special_insn:
switch (c->b) {
case 0x00 ... 0x05:
- add: /* add */
+ add: /* add */
emulate_2op_SrcV("add", c->src, c->dst, ctxt->eflags);
break;
case 0x06: /* push es */
@@ -1930,14 +1929,14 @@ special_insn:
goto done;
break;
case 0x08 ... 0x0d:
- or: /* or */
+ or: /* or */
emulate_2op_SrcV("or", c->src, c->dst, ctxt->eflags);
break;
case 0x0e: /* push cs */
emulate_push_sreg(ctxt, VCPU_SREG_CS);
break;
case 0x10 ... 0x15:
- adc: /* adc */
+ adc: /* adc */
emulate_2op_SrcV("adc", c->src, c->dst, ctxt->eflags);
break;
case 0x16: /* push ss */
@@ -1949,7 +1948,7 @@ special_insn:
goto done;
break;
case 0x18 ... 0x1d:
- sbb: /* sbb */
+ sbb: /* sbb */
emulate_2op_SrcV("sbb", c->src, c->dst, ctxt->eflags);
break;
case 0x1e: /* push ds */
@@ -1961,19 +1960,19 @@ special_insn:
goto done;
break;
case 0x20 ... 0x25:
- and: /* and */
+ and: /* and */
emulate_2op_SrcV("and", c->src, c->dst, ctxt->eflags);
break;
case 0x28 ... 0x2d:
- sub: /* sub */
+ sub: /* sub */
emulate_2op_SrcV("sub", c->src, c->dst, ctxt->eflags);
break;
case 0x30 ... 0x35:
- xor: /* xor */
+ xor: /* xor */
emulate_2op_SrcV("xor", c->src, c->dst, ctxt->eflags);
break;
case 0x38 ... 0x3d:
- cmp: /* cmp */
+ cmp: /* cmp */
emulate_2op_SrcV("cmp", c->src, c->dst, ctxt->eflags);
break;
case 0x40 ... 0x47: /* inc r16/r32 */
@@ -2224,11 +2223,11 @@ special_insn:
emulate_2op_SrcV("cmp", c->src, c->dst, ctxt->eflags);
register_address_increment(c, &c->regs[VCPU_REGS_RSI],
- (ctxt->eflags & EFLG_DF) ? -c->src.bytes
- : c->src.bytes);
+ (ctxt->eflags & EFLG_DF) ? -c->src.bytes
+ : c->src.bytes);
register_address_increment(c, &c->regs[VCPU_REGS_RDI],
- (ctxt->eflags & EFLG_DF) ? -c->dst.bytes
- : c->dst.bytes);
+ (ctxt->eflags & EFLG_DF) ? -c->dst.bytes
+ : c->dst.bytes);
break;
case 0xaa ... 0xab: /* stos */
@@ -2288,8 +2287,8 @@ special_insn:
c->src.val = c->regs[VCPU_REGS_RCX];
emulate_grp2(ctxt);
break;
- case 0xe4: /* inb */
- case 0xe5: /* in */
+ case 0xe4: /* inb */
+ case 0xe5: /* in */
port = c->src.val;
io_dir_in = 1;
goto do_io;
@@ -2315,7 +2314,7 @@ special_insn:
c->eip = c->src.val;
break;
case 0xeb:
- jmp: /* jmp rel short */
+ jmp: /* jmp rel short */
jmp_rel(c, c->src.val);
c->dst.type = OP_NONE; /* Disable writeback. */
break;
@@ -2475,7 +2474,7 @@ twobyte_insn:
goto cannot_emulate;
}
break;
- case 0x05: /* syscall */
+ case 0x05: /* syscall */
rc = emulate_syscall(ctxt);
if (rc != X86EMUL_CONTINUE)
goto done;
@@ -2581,7 +2580,7 @@ twobyte_insn:
goto done;
break;
case 0xa3:
- bt: /* bt */
+ bt: /* bt */
c->dst.type = OP_NONE;
/* only subword offset */
c->src.val &= (c->dst.bytes << 3) - 1;
@@ -2600,7 +2599,7 @@ twobyte_insn:
goto done;
break;
case 0xab:
- bts: /* bts */
+ bts: /* bts */
/* only subword offset */
c->src.val &= (c->dst.bytes << 3) - 1;
emulate_2op_SrcV_nobyte("bts", c->src, c->dst, ctxt->eflags);
@@ -2629,7 +2628,7 @@ twobyte_insn:
}
break;
case 0xb3:
- btr: /* btr */
+ btr: /* btr */
/* only subword offset */
c->src.val &= (c->dst.bytes << 3) - 1;
emulate_2op_SrcV_nobyte("btr", c->src, c->dst, ctxt->eflags);
@@ -2652,7 +2651,7 @@ twobyte_insn:
}
break;
case 0xbb:
- btc: /* btc */
+ btc: /* btc */
/* only subword offset */
c->src.val &= (c->dst.bytes << 3) - 1;
emulate_2op_SrcV_nobyte("btc", c->src, c->dst, ctxt->eflags);
@@ -332,17 +332,15 @@ static void pit_load_count(struct kvm *kvm, int channel, u32 val)
switch (ps->channels[0].mode) {
case 0:
case 1:
- /* FIXME: enhance mode 4 precision */
+ /* FIXME: enhance mode 4 precision */
case 4:
- if (!(ps->flags & KVM_PIT_FLAGS_HPET_LEGACY)) {
+ if (!(ps->flags & KVM_PIT_FLAGS_HPET_LEGACY))
create_pit_timer(ps, val, 0);
- }
break;
case 2:
case 3:
- if (!(ps->flags & KVM_PIT_FLAGS_HPET_LEGACY)){
+ if (!(ps->flags & KVM_PIT_FLAGS_HPET_LEGACY))
create_pit_timer(ps, val, 1);
- }
break;
default:
destroy_pit_timer(&ps->pit_timer);
@@ -42,12 +42,12 @@ struct kvm_pit {
struct kvm_irq_mask_notifier mask_notifier;
};
-#define KVM_PIT_BASE_ADDRESS 0x40
-#define KVM_SPEAKER_BASE_ADDRESS 0x61
-#define KVM_PIT_MEM_LENGTH 4
-#define KVM_PIT_FREQ 1193181
-#define KVM_MAX_PIT_INTR_INTERVAL HZ / 100
-#define KVM_PIT_CHANNEL_MASK 0x3
+#define KVM_PIT_BASE_ADDRESS 0x40
+#define KVM_SPEAKER_BASE_ADDRESS 0x61
+#define KVM_PIT_MEM_LENGTH 4
+#define KVM_PIT_FREQ 1193181
+#define KVM_MAX_PIT_INTR_INTERVAL (HZ / 100)
+#define KVM_PIT_CHANNEL_MASK 0x3
void kvm_inject_pit_timer_irqs(struct kvm_vcpu *vcpu);
void kvm_pit_load_count(struct kvm *kvm, int channel, u32 val, int hpet_legacy_start);
@@ -259,9 +259,8 @@ void kvm_pic_reset(struct kvm_kpic_state *s)
for (irq = 0; irq < PIC_NUM_PINS/2; irq++) {
if (vcpu0 && kvm_apic_accept_pic_intr(vcpu0))
- if (irr & (1 << irq) || isr & (1 << irq)) {
+ if (irr & (1 << irq) || isr & (1 << irq))
pic_clear_isr(s, irq);
- }
}
}
@@ -1,7 +1,7 @@
struct kvm_timer {
struct hrtimer timer;
- s64 period; /* unit: ns */
+ s64 period; /* unit: ns */
atomic_t pending; /* accumulated triggered timers */
bool reinject;
struct kvm_timer_ops *t_ops;
@@ -10,9 +10,7 @@ struct kvm_timer {
};
struct kvm_timer_ops {
- bool (*is_periodic)(struct kvm_timer *);
+ bool (*is_periodic)(struct kvm_timer *);
};
-
enum hrtimer_restart kvm_timer_fn(struct hrtimer *data);
-
@@ -810,9 +810,9 @@ static int apic_reg_write(struct kvm_lapic *apic, u32 reg, u32 val)
break;
case APIC_SELF_IPI:
- if (apic_x2apic_mode(apic)) {
+ if (apic_x2apic_mode(apic))
apic_reg_write(apic, APIC_ICR, 0x40000 | (val & 0xff));
- } else
+ else
ret = 1;
break;
default:
@@ -845,7 +845,7 @@ static int apic_mmio_write(struct kvm_io_device *this,
return 0;
}
- val = *(u32*)data;
+ val = *(u32 *)data;
/* too common printing */
if (offset != APIC_EOI)
@@ -766,7 +766,7 @@ static int kvm_set_pte_rmapp(struct kvm *kvm, unsigned long *rmapp,
__set_spte(spte, shadow_trap_nonpresent_pte);
spte = rmap_next(kvm, rmapp, NULL);
} else {
- new_spte = *spte &~ (PT64_BASE_ADDR_MASK);
+ new_spte = *spte &~(PT64_BASE_ADDR_MASK);
new_spte |= (u64)new_pfn << PAGE_SHIFT;
new_spte &= ~PT_WRITABLE_MASK;
@@ -1118,7 +1118,7 @@ static int mmu_pages_add(struct kvm_mmu_pages *pvec, struct kvm_mmu_page *sp,
int i;
if (sp->unsync)
- for (i=0; i < pvec->nr; i++)
+ for (i = 0; i < pvec->nr; i++)
if (pvec->page[i].sp == sp)
return 0;
@@ -1234,7 +1234,7 @@ struct mmu_page_path {
#define for_each_sp(pvec, sp, parents, i) \
for (i = mmu_pages_next(&pvec, &parents, -1), \
sp = pvec.page[i].sp; \
- i < pvec.nr && ({ sp = pvec.page[i].sp; 1;}); \
+ i < pvec.nr && ({ sp = pvec.page[i].sp; 1; }); \
i = mmu_pages_next(&pvec, &parents, i))
static int mmu_pages_next(struct kvm_mmu_pages *pvec,
@@ -1550,8 +1550,7 @@ void kvm_mmu_change_mmu_pages(struct kvm *kvm, unsigned int kvm_nr_mmu_pages)
used_pages--;
}
kvm->arch.n_free_mmu_pages = 0;
- }
- else
+ } else
kvm->arch.n_free_mmu_pages += kvm_nr_mmu_pages
- kvm->arch.n_alloc_mmu_pages;
@@ -1906,8 +1905,8 @@ static void mmu_set_spte(struct kvm_vcpu *vcpu, u64 *sptep,
pgprintk("%s: setting spte %llx\n", __func__, *sptep);
pgprintk("instantiating %s PTE (%s) at %ld (%llx) addr %p\n",
- is_large_pte(*sptep)? "2MB" : "4kB",
- *sptep & PT_PRESENT_MASK ?"RW":"R", gfn,
+ is_large_pte(*sptep) ? "2MB" : "4kB",
+ *sptep & PT_PRESENT_MASK ? "RW" : "R", gfn,
*sptep, sptep);
if (!was_rmapped && is_large_pte(*sptep))
++vcpu->kvm->stat.lpages;
@@ -2310,7 +2309,7 @@ static void reset_rsvds_bits_mask(struct kvm_vcpu *vcpu, int level)
context->rsvd_bits_mask[0][1] = exb_bit_rsvd |
rsvd_bits(maxphyaddr, 62); /* PDE */
context->rsvd_bits_mask[0][0] = exb_bit_rsvd |
- rsvd_bits(maxphyaddr, 62); /* PTE */
+ rsvd_bits(maxphyaddr, 62); /* PTE */
context->rsvd_bits_mask[1][1] = exb_bit_rsvd |
rsvd_bits(maxphyaddr, 62) |
rsvd_bits(13, 20); /* large page */
@@ -2520,7 +2519,7 @@ static void mmu_pte_write_new_pte(struct kvm_vcpu *vcpu,
if (sp->role.level != PT_PAGE_TABLE_LEVEL) {
++vcpu->kvm->stat.mmu_pde_zapped;
return;
- }
+ }
++vcpu->kvm->stat.mmu_pte_updated;
if (sp->role.glevels == PT32_ROOT_LEVEL)
@@ -26,8 +26,8 @@
static const char *access_str[] = { \
"---", "--x", "w--", "w-x", "-u-", "-ux", "wu-", "wux" \
}; \
- union kvm_mmu_page_role role; \
- \
+ union kvm_mmu_page_role role; \
+ \
role.word = __entry->role; \
\
trace_seq_printf(p, "sp gfn %llx %u/%u q%u%s %s%s %spge" \
@@ -67,7 +67,7 @@ TRACE_EVENT(
TP_fast_assign(
__entry->addr = addr;
__entry->pferr = (!!write_fault << 1) | (!!user_fault << 2)
- | (!!fetch_fault << 4);
+ | (!!fetch_fault << 4);
),
TP_printk("addr %llx pferr %x %s", __entry->addr, __entry->pferr,
@@ -542,16 +542,16 @@ static void init_vmcb(struct vcpu_svm *svm)
svm->vcpu.fpu_active = 1;
- control->intercept_cr_read = INTERCEPT_CR0_MASK |
+ control->intercept_cr_read = INTERCEPT_CR0_MASK |
INTERCEPT_CR3_MASK |
INTERCEPT_CR4_MASK;
- control->intercept_cr_write = INTERCEPT_CR0_MASK |
+ control->intercept_cr_write = INTERCEPT_CR0_MASK |
INTERCEPT_CR3_MASK |
INTERCEPT_CR4_MASK |
INTERCEPT_CR8_MASK;
- control->intercept_dr_read = INTERCEPT_DR0_MASK |
+ control->intercept_dr_read = INTERCEPT_DR0_MASK |
INTERCEPT_DR1_MASK |
INTERCEPT_DR2_MASK |
INTERCEPT_DR3_MASK |
@@ -560,7 +560,7 @@ static void init_vmcb(struct vcpu_svm *svm)
INTERCEPT_DR6_MASK |
INTERCEPT_DR7_MASK;
- control->intercept_dr_write = INTERCEPT_DR0_MASK |
+ control->intercept_dr_write = INTERCEPT_DR0_MASK |
INTERCEPT_DR1_MASK |
INTERCEPT_DR2_MASK |
INTERCEPT_DR3_MASK |
@@ -574,7 +574,7 @@ static void init_vmcb(struct vcpu_svm *svm)
(1 << MC_VECTOR);
- control->intercept = (1ULL << INTERCEPT_INTR) |
+ control->intercept = (1ULL << INTERCEPT_INTR) |
(1ULL << INTERCEPT_NMI) |
(1ULL << INTERCEPT_SMI) |
(1ULL << INTERCEPT_SELECTIVE_CR0) |
@@ -1228,7 +1228,7 @@ static int db_interception(struct vcpu_svm *svm)
}
if (svm->vcpu.guest_debug &
- (KVM_GUESTDBG_SINGLESTEP | KVM_GUESTDBG_USE_HW_BP)){
+ (KVM_GUESTDBG_SINGLESTEP | KVM_GUESTDBG_USE_HW_BP)) {
kvm_run->exit_reason = KVM_EXIT_DEBUG;
kvm_run->debug.arch.pc =
svm->vmcb->save.cs.base + svm->vmcb->save.rip;
@@ -1565,9 +1565,8 @@ static int nested_svm_exit_handled(struct vcpu_svm *svm)
}
}
- if (vmexit == NESTED_EXIT_DONE) {
+ if (vmexit == NESTED_EXIT_DONE)
nested_svm_vmexit(svm);
- }
return vmexit;
}
@@ -1723,7 +1722,7 @@ static bool nested_svm_vmrun_msrpm(struct vcpu_svm *svm)
if (!nested_msrpm)
return false;
- for (i=0; i< PAGE_SIZE * (1 << MSRPM_ALLOC_ORDER) / 4; i++)
+ for (i = 0; i < PAGE_SIZE * (1 << MSRPM_ALLOC_ORDER) / 4; i++)
svm->nested.msrpm[i] = svm->msrpm[i] | nested_msrpm[i];
svm->vmcb->control.msrpm_base_pa = __pa(svm->nested.msrpm);
@@ -2320,15 +2319,15 @@ static int pause_interception(struct vcpu_svm *svm)
}
static int (*svm_exit_handlers[])(struct vcpu_svm *svm) = {
- [SVM_EXIT_READ_CR0] = emulate_on_interception,
- [SVM_EXIT_READ_CR3] = emulate_on_interception,
- [SVM_EXIT_READ_CR4] = emulate_on_interception,
- [SVM_EXIT_READ_CR8] = emulate_on_interception,
+ [SVM_EXIT_READ_CR0] = emulate_on_interception,
+ [SVM_EXIT_READ_CR3] = emulate_on_interception,
+ [SVM_EXIT_READ_CR4] = emulate_on_interception,
+ [SVM_EXIT_READ_CR8] = emulate_on_interception,
[SVM_EXIT_CR0_SEL_WRITE] = emulate_on_interception,
- [SVM_EXIT_WRITE_CR0] = emulate_on_interception,
- [SVM_EXIT_WRITE_CR3] = emulate_on_interception,
- [SVM_EXIT_WRITE_CR4] = emulate_on_interception,
- [SVM_EXIT_WRITE_CR8] = cr8_write_interception,
+ [SVM_EXIT_WRITE_CR0] = emulate_on_interception,
+ [SVM_EXIT_WRITE_CR3] = emulate_on_interception,
+ [SVM_EXIT_WRITE_CR4] = emulate_on_interception,
+ [SVM_EXIT_WRITE_CR8] = cr8_write_interception,
[SVM_EXIT_READ_DR0] = emulate_on_interception,
[SVM_EXIT_READ_DR1] = emulate_on_interception,
[SVM_EXIT_READ_DR2] = emulate_on_interception,
@@ -2358,13 +2357,13 @@ static int (*svm_exit_handlers[])(struct vcpu_svm *svm) = {
[SVM_EXIT_VINTR] = interrupt_window_interception,
/* [SVM_EXIT_CR0_SEL_WRITE] = emulate_on_interception, */
[SVM_EXIT_CPUID] = cpuid_interception,
- [SVM_EXIT_IRET] = iret_interception,
- [SVM_EXIT_INVD] = emulate_on_interception,
+ [SVM_EXIT_IRET] = iret_interception,
+ [SVM_EXIT_INVD] = emulate_on_interception,
[SVM_EXIT_PAUSE] = pause_interception,
[SVM_EXIT_HLT] = halt_interception,
[SVM_EXIT_INVLPG] = invlpg_interception,
[SVM_EXIT_INVLPGA] = invlpga_interception,
- [SVM_EXIT_IOIO] = io_interception,
+ [SVM_EXIT_IOIO] = io_interception,
[SVM_EXIT_MSR] = msr_interception,
[SVM_EXIT_TASK_SWITCH] = task_switch_interception,
[SVM_EXIT_SHUTDOWN] = shutdown_interception,
@@ -2375,7 +2374,7 @@ static int (*svm_exit_handlers[])(struct vcpu_svm *svm) = {
[SVM_EXIT_STGI] = stgi_interception,
[SVM_EXIT_CLGI] = clgi_interception,
[SVM_EXIT_SKINIT] = skinit_interception,
- [SVM_EXIT_WBINVD] = emulate_on_interception,
+ [SVM_EXIT_WBINVD] = emulate_on_interception,
[SVM_EXIT_MONITOR] = invalid_op_interception,
[SVM_EXIT_MWAIT] = invalid_op_interception,
[SVM_EXIT_NPF] = pf_interception,
@@ -2874,24 +2873,24 @@ static void svm_cpuid_update(struct kvm_vcpu *vcpu)
}
static const struct trace_print_flags svm_exit_reasons_str[] = {
- { SVM_EXIT_READ_CR0, "read_cr0" },
- { SVM_EXIT_READ_CR3, "read_cr3" },
- { SVM_EXIT_READ_CR4, "read_cr4" },
- { SVM_EXIT_READ_CR8, "read_cr8" },
- { SVM_EXIT_WRITE_CR0, "write_cr0" },
- { SVM_EXIT_WRITE_CR3, "write_cr3" },
- { SVM_EXIT_WRITE_CR4, "write_cr4" },
- { SVM_EXIT_WRITE_CR8, "write_cr8" },
- { SVM_EXIT_READ_DR0, "read_dr0" },
- { SVM_EXIT_READ_DR1, "read_dr1" },
- { SVM_EXIT_READ_DR2, "read_dr2" },
- { SVM_EXIT_READ_DR3, "read_dr3" },
- { SVM_EXIT_WRITE_DR0, "write_dr0" },
- { SVM_EXIT_WRITE_DR1, "write_dr1" },
- { SVM_EXIT_WRITE_DR2, "write_dr2" },
- { SVM_EXIT_WRITE_DR3, "write_dr3" },
- { SVM_EXIT_WRITE_DR5, "write_dr5" },
- { SVM_EXIT_WRITE_DR7, "write_dr7" },
+ { SVM_EXIT_READ_CR0, "read_cr0" },
+ { SVM_EXIT_READ_CR3, "read_cr3" },
+ { SVM_EXIT_READ_CR4, "read_cr4" },
+ { SVM_EXIT_READ_CR8, "read_cr8" },
+ { SVM_EXIT_WRITE_CR0, "write_cr0" },
+ { SVM_EXIT_WRITE_CR3, "write_cr3" },
+ { SVM_EXIT_WRITE_CR4, "write_cr4" },
+ { SVM_EXIT_WRITE_CR8, "write_cr8" },
+ { SVM_EXIT_READ_DR0, "read_dr0" },
+ { SVM_EXIT_READ_DR1, "read_dr1" },
+ { SVM_EXIT_READ_DR2, "read_dr2" },
+ { SVM_EXIT_READ_DR3, "read_dr3" },
+ { SVM_EXIT_WRITE_DR0, "write_dr0" },
+ { SVM_EXIT_WRITE_DR1, "write_dr1" },
+ { SVM_EXIT_WRITE_DR2, "write_dr2" },
+ { SVM_EXIT_WRITE_DR3, "write_dr3" },
+ { SVM_EXIT_WRITE_DR5, "write_dr5" },
+ { SVM_EXIT_WRITE_DR7, "write_dr7" },
{ SVM_EXIT_EXCP_BASE + DB_VECTOR, "DB excp" },
{ SVM_EXIT_EXCP_BASE + BP_VECTOR, "BP excp" },
{ SVM_EXIT_EXCP_BASE + UD_VECTOR, "UD excp" },
@@ -35,7 +35,7 @@ TRACE_EVENT(kvm_hypercall,
TP_ARGS(nr, a0, a1, a2, a3),
TP_STRUCT__entry(
- __field( unsigned long, nr )
+ __field( unsigned long, nr )
__field( unsigned long, a0 )
__field( unsigned long, a1 )
__field( unsigned long, a2 )
@@ -96,9 +96,9 @@ TRACE_EVENT(kvm_pio,
TP_ARGS(rw, port, size, count),
TP_STRUCT__entry(
- __field( unsigned int, rw )
- __field( unsigned int, port )
- __field( unsigned int, size )
+ __field( unsigned int, rw )
+ __field( unsigned int, port )
+ __field( unsigned int, size )
__field( unsigned int, count )
),
@@ -543,8 +543,8 @@ TRACE_EVENT(kvm_skinit,
),
TP_fast_assign(
- __entry->rip = rip;
- __entry->slb = slb;
+ __entry->rip = rip;
+ __entry->slb = slb;
),
TP_printk("rip: 0x%016llx slb: 0x%08x\n",
@@ -108,25 +108,25 @@ struct shared_msr_entry {
};
struct vcpu_vmx {
- struct kvm_vcpu vcpu;
- struct list_head local_vcpus_link;
- unsigned long host_rsp;
- int launched;
- u8 fail;
- u32 idt_vectoring_info;
- struct shared_msr_entry *guest_msrs;
- int nmsrs;
- int save_nmsrs;
+ struct kvm_vcpu vcpu;
+ struct list_head local_vcpus_link;
+ unsigned long host_rsp;
+ int launched;
+ u8 fail;
+ u32 idt_vectoring_info;
+ struct shared_msr_entry *guest_msrs;
+ int nmsrs;
+ int save_nmsrs;
#ifdef CONFIG_X86_64
- u64 msr_host_kernel_gs_base;
- u64 msr_guest_kernel_gs_base;
+ u64 msr_host_kernel_gs_base;
+ u64 msr_guest_kernel_gs_base;
#endif
- struct vmcs *vmcs;
+ struct vmcs *vmcs;
struct {
- int loaded;
- u16 fs_sel, gs_sel, ldt_sel;
- int gs_ldt_reload_needed;
- int fs_reload_needed;
+ int loaded;
+ u16 fs_sel, gs_sel, ldt_sel;
+ int gs_ldt_reload_needed;
+ int fs_reload_needed;
} host_state;
struct {
int vm86_active;
@@ -143,8 +143,8 @@ struct vcpu_vmx {
unsigned rip;
} irq;
} rmode;
- int vpid;
- bool emulation_required;
+ int vpid;
+ bool emulation_required;
/* Support for vnmi-less CPUs */
int soft_vnmi_blocked;
@@ -194,9 +194,9 @@ static struct vmx_capability {
#define VMX_SEGMENT_FIELD(seg) \
[VCPU_SREG_##seg] = { \
.selector = GUEST_##seg##_SELECTOR, \
- .base = GUEST_##seg##_BASE, \
- .limit = GUEST_##seg##_LIMIT, \
- .ar_bytes = GUEST_##seg##_AR_BYTES, \
+ .base = GUEST_##seg##_BASE, \
+ .limit = GUEST_##seg##_LIMIT, \
+ .ar_bytes = GUEST_##seg##_AR_BYTES, \
}
static struct kvm_vmx_segment_field {
@@ -1625,7 +1625,7 @@ static void enter_lmode(struct kvm_vcpu *vcpu)
guest_tr_ar = vmcs_read32(GUEST_TR_AR_BYTES);
if ((guest_tr_ar & AR_TYPE_MASK) != AR_TYPE_BUSY_64_TSS) {
- printk(KERN_DEBUG "%s: tss fixup for long mode. \n",
+ printk(KERN_DEBUG "%s: tss fixup for long mode.\n",
__func__);
vmcs_write32(GUEST_TR_AR_BYTES,
(guest_tr_ar & ~AR_TYPE_MASK)
@@ -3742,7 +3742,7 @@ static int kvmclock_cpufreq_notifier(struct notifier_block *nb, unsigned long va
}
static struct notifier_block kvmclock_cpufreq_notifier_block = {
- .notifier_call = kvmclock_cpufreq_notifier
+ .notifier_call = kvmclock_cpufreq_notifier
};
static void kvm_timer_init(void)
@@ -4146,9 +4146,9 @@ EXPORT_SYMBOL_GPL(kvm_emulate_cpuid);
*/
static int dm_request_for_irq_injection(struct kvm_vcpu *vcpu)
{
- return (!irqchip_in_kernel(vcpu->kvm) && !kvm_cpu_has_interrupt(vcpu) &&
+ return !irqchip_in_kernel(vcpu->kvm) && !kvm_cpu_has_interrupt(vcpu) &&
vcpu->run->request_interrupt_window &&
- kvm_arch_interrupt_allowed(vcpu));
+ kvm_arch_interrupt_allowed(vcpu);
}
static void post_kvm_run_save(struct kvm_vcpu *vcpu)
@@ -4412,9 +4412,8 @@ static int __vcpu_run(struct kvm_vcpu *vcpu)
srcu_read_unlock(&kvm->srcu, vcpu->srcu_idx);
kvm_vcpu_block(vcpu);
vcpu->srcu_idx = srcu_read_lock(&kvm->srcu);
- if (test_and_clear_bit(KVM_REQ_UNHALT, &vcpu->requests))
- {
- switch(vcpu->arch.mp_state) {
+ if (test_and_clear_bit(KVM_REQ_UNHALT, &vcpu->requests)) {
+ switch (vcpu->arch.mp_state) {
case KVM_MP_STATE_HALTED:
vcpu->arch.mp_state =
KVM_MP_STATE_RUNNABLE;
@@ -4703,8 +4702,7 @@ static void get_segment_descriptor_dtable(struct kvm_vcpu *vcpu,
else
dtable->limit = kvm_seg.limit;
dtable->base = kvm_seg.base;
- }
- else
+ } else
kvm_x86_ops->get_gdt(vcpu, dtable);
}
@@ -4871,7 +4869,7 @@ int kvm_load_segment_descriptor(struct kvm_vcpu *vcpu, u16 selector, int seg)
}
/* CS(RPL) <- CPL */
selector = (selector & 0xfffc) | cpl;
- break;
+ break;
case VCPU_SREG_TR:
if (kvm_seg.s || (kvm_seg.type != 1 && kvm_seg.type != 9))
goto exception;
@@ -5173,7 +5171,7 @@ int kvm_task_switch(struct kvm_vcpu *vcpu, u16 tss_selector, int reason)
}
if (reason == TASK_SWITCH_IRET || reason == TASK_SWITCH_JMP) {
- cseg_desc.type &= ~(1 << 1); //clear the B flag
+ cseg_desc.type &= ~(1 << 1); /* clear the B flag */
save_guest_segment_descriptor(vcpu, old_tss_sel, &cseg_desc);
}
@@ -34,7 +34,7 @@ static inline bool kvm_exception_is_soft(unsigned int nr)
}
struct kvm_cpuid_entry2 *kvm_find_cpuid_entry(struct kvm_vcpu *vcpu,
- u32 function, u32 index);
+ u32 function, u32 index);
static inline bool is_protmode(struct kvm_vcpu *vcpu)
{