@@ -132,6 +132,12 @@
0x00082020 CPU%(cpu)d %(tsc)d (+%(reltsc)8d) INTR_WINDOW [ value = 0x%(1)08x ]
0x00082021 CPU%(cpu)d %(tsc)d (+%(reltsc)8d) NPF [ gpa = 0x%(2)08x%(1)08x mfn = 0x%(4)08x%(3)08x qual = 0x%(5)04x p2mt = 0x%(6)04x ]
0x00082023 CPU%(cpu)d %(tsc)d (+%(reltsc)8d) TRAP [ vector = 0x%(1)02x ]
+0x00082024 CPU%(cpu)d %(tsc)d (+%(reltsc)8d) TRAP_DEBUG [ exit_qualification = 0x%(1)08x ]
+0x00082025 CPU%(cpu)d %(tsc)d (+%(reltsc)8d) VLAPIC
+0x00082028 CPU%(cpu)d %(tsc)d (+%(reltsc)8d) VMPORT_HANDLED [ cmd = %(1)d eax = 0x%(2)08x ebx = 0x%(3)08x ecx = 0x%(4)08x edx = 0x%(5)08x esi = 0x%(6)08x edi = 0x%(7)08x ]
+0x00082029 CPU%(cpu)d %(tsc)d (+%(reltsc)8d) VMPORT_IGNORED [ port = %(1)d eax = 0x%(2)08x ]
+0x0008202a CPU%(cpu)d %(tsc)d (+%(reltsc)8d) VMPORT_QEMU [ eax = 0x%(1)08x ebx = 0x%(2)08x ecx = 0x%(3)08x edx = 0x%(4)08x esi = 0x%(5)08x edi = 0x%(6)08x ]
+0x0008202b CPU%(cpu)d %(tsc)d (+%(reltsc)8d) VMPORT_SEND [ cmd = 0x%(1)08x ebx = 0x%(2)08x ecx = 0x%(3)08x edx = 0x%(4)08x esi = 0x%(5)08x edi = 0x%(6)08x ]
0x0010f001 CPU%(cpu)d %(tsc)d (+%(reltsc)8d) page_grant_map [ domid = %(1)d ]
0x0010f002 CPU%(cpu)d %(tsc)d (+%(reltsc)8d) page_grant_unmap [ domid = %(1)d ]
@@ -262,6 +262,9 @@ bool handle_hvm_io_completion(struct vcpu *v)
regs->edx = vr->edx;
regs->esi = vr->esi;
regs->edi = vr->edi;
+ HVMTRACE_ND(VMPORT_QEMU, 0, 1/*cycles*/, 6,
+ vio->io_req.data, regs->ebx, regs->ecx,
+ regs->edx, regs->esi, regs->edi, 0);
}
}
return handle_pio(vio->io_req.addr, vio->io_req.size,
@@ -1052,7 +1052,7 @@ void svm_vmenter_helper(const struct cpu_user_regs *regs)
if ( unlikely(tb_init_done) )
HVMTRACE_ND(VMENTRY,
nestedhvm_vcpu_in_guestmode(curr) ? TRC_HVM_NESTEDFLAG : 0,
- 1/*cycles*/, 0, 0, 0, 0, 0, 0, 0);
+ 1/*cycles*/, 0, 0, 0, 0, 0, 0, 0, 0);
svm_sync_vmcb(curr, vmcb_needs_vmsave);
@@ -2531,11 +2531,11 @@ void svm_vmexit_handler(struct cpu_user_regs *regs)
if ( hvm_long_mode_active(v) )
HVMTRACE_ND(VMEXIT64, vcpu_guestmode ? TRC_HVM_NESTEDFLAG : 0,
1/*cycles*/, 3, exit_reason,
- regs->eip, regs->rip >> 32, 0, 0, 0);
+ regs->eip, regs->rip >> 32, 0, 0, 0, 0);
else
HVMTRACE_ND(VMEXIT, vcpu_guestmode ? TRC_HVM_NESTEDFLAG : 0,
1/*cycles*/, 2, exit_reason,
- regs->eip, 0, 0, 0, 0);
+ regs->eip, 0, 0, 0, 0, 0);
if ( vcpu_guestmode )
{
@@ -17,6 +17,7 @@
#include <asm/mc146818rtc.h>
#include <asm/hvm/hvm.h>
#include <asm/hvm/support.h>
+#include <asm/hvm/trace.h>
#include "backdoor_def.h"
@@ -62,6 +63,7 @@ static int vmport_ioport(int dir, uint32_t port, uint32_t bytes, uint32_t *val)
if ( port == BDOOR_PORT && regs->eax == BDOOR_MAGIC )
{
uint32_t new_eax = ~0u;
+ uint16_t cmd = regs->ecx;
uint64_t value;
struct vcpu *curr = current;
struct domain *currd = curr->domain;
@@ -72,7 +74,7 @@ static int vmport_ioport(int dir, uint32_t port, uint32_t bytes, uint32_t *val)
* leaving the high 32-bits unchanged, unlike what one would
* expect to happen.
*/
- switch ( regs->ecx & 0xffff )
+ switch ( cmd )
{
case BDOOR_CMD_GETMHZ:
new_eax = currd->arch.tsc_khz / 1000;
@@ -147,14 +149,22 @@ static int vmport_ioport(int dir, uint32_t port, uint32_t bytes, uint32_t *val)
break;
default:
+ HVMTRACE_6D(VMPORT_SEND, cmd, regs->ebx, regs->ecx,
+ regs->edx, regs->esi, regs->edi);
/* Let backing DM handle */
return X86EMUL_UNHANDLEABLE;
}
+ HVMTRACE_7D(VMPORT_HANDLED, cmd, new_eax, regs->ebx, regs->ecx,
+ regs->edx, regs->esi, regs->edi);
if ( dir == IOREQ_READ )
*val = new_eax;
}
- else if ( dir == IOREQ_READ )
- *val = ~0u;
+ else
+ {
+ HVMTRACE_2D(VMPORT_IGNORED, port, regs->eax);
+ if ( dir == IOREQ_READ )
+ *val = ~0u;
+ }
return X86EMUL_OKAY;
}
@@ -3683,10 +3683,10 @@ void vmx_vmexit_handler(struct cpu_user_regs *regs)
if ( hvm_long_mode_active(v) )
HVMTRACE_ND(VMEXIT64, 0, 1/*cycles*/, 3, exit_reason,
- regs->eip, regs->rip >> 32, 0, 0, 0);
+ regs->eip, regs->rip >> 32, 0, 0, 0, 0);
else
HVMTRACE_ND(VMEXIT, 0, 1/*cycles*/, 2, exit_reason,
- regs->eip, 0, 0, 0, 0);
+ regs->eip, 0, 0, 0, 0, 0);
perfc_incra(vmexits, exit_reason);
@@ -4463,7 +4463,7 @@ bool vmx_vmenter_helper(const struct cpu_user_regs *regs)
if ( unlikely(curr->arch.hvm.vmx.lbr_flags & LBR_FIXUP_MASK) )
lbr_fixup();
- HVMTRACE_ND(VMENTRY, 0, 1/*cycles*/, 0, 0, 0, 0, 0, 0, 0);
+ HVMTRACE_ND(VMENTRY, 0, 1/*cycles*/, 0, 0, 0, 0, 0, 0, 0, 0);
__vmwrite(GUEST_RIP, regs->rip);
__vmwrite(GUEST_RSP, regs->rsp);
@@ -56,6 +56,10 @@
#define DO_TRC_HVM_TRAP DEFAULT_HVM_MISC
#define DO_TRC_HVM_TRAP_DEBUG DEFAULT_HVM_MISC
#define DO_TRC_HVM_VLAPIC DEFAULT_HVM_MISC
+#define DO_TRC_HVM_VMPORT_HANDLED DEFAULT_HVM_IO
+#define DO_TRC_HVM_VMPORT_IGNORED DEFAULT_HVM_IO
+#define DO_TRC_HVM_VMPORT_QEMU DEFAULT_HVM_IO
+#define DO_TRC_HVM_VMPORT_SEND DEFAULT_HVM_IO
#define TRC_PAR_LONG(par) ((par)&0xFFFFFFFF),((par)>>32)
@@ -67,38 +71,34 @@
#define TRACE_2_LONG_4D(_e, d1, d2, d3, d4, ...) \
TRACE_6D(_e, d1, d2, d3, d4)
-#define HVMTRACE_ND(evt, modifier, cycles, count, d1, d2, d3, d4, d5, d6) \
- do { \
- if ( unlikely(tb_init_done) && DO_TRC_HVM_ ## evt ) \
- { \
- struct { \
- u32 d[6]; \
- } _d; \
- _d.d[0]=(d1); \
- _d.d[1]=(d2); \
- _d.d[2]=(d3); \
- _d.d[3]=(d4); \
- _d.d[4]=(d5); \
- _d.d[5]=(d6); \
- __trace_var(TRC_HVM_ ## evt | (modifier), cycles, \
- sizeof(*_d.d) * count, &_d); \
- } \
+#define HVMTRACE_ND(evt, modifier, cycles, count, d1, d2, d3, d4, d5, d6, d7) \
+ do { \
+ if ( unlikely(tb_init_done) && DO_TRC_HVM_ ## evt ) \
+ { \
+ struct { \
+ u32 d[7]; \
+ } _d = { { d1, d2, d3, d4, d5, d6, d7 } }; \
+ __trace_var(TRC_HVM_ ## evt | (modifier), cycles, \
+ sizeof(*_d.d) * count, &_d); \
+ } \
} while(0)
-#define HVMTRACE_6D(evt, d1, d2, d3, d4, d5, d6) \
- HVMTRACE_ND(evt, 0, 0, 6, d1, d2, d3, d4, d5, d6)
-#define HVMTRACE_5D(evt, d1, d2, d3, d4, d5) \
- HVMTRACE_ND(evt, 0, 0, 5, d1, d2, d3, d4, d5, 0)
-#define HVMTRACE_4D(evt, d1, d2, d3, d4) \
- HVMTRACE_ND(evt, 0, 0, 4, d1, d2, d3, d4, 0, 0)
-#define HVMTRACE_3D(evt, d1, d2, d3) \
- HVMTRACE_ND(evt, 0, 0, 3, d1, d2, d3, 0, 0, 0)
-#define HVMTRACE_2D(evt, d1, d2) \
- HVMTRACE_ND(evt, 0, 0, 2, d1, d2, 0, 0, 0, 0)
-#define HVMTRACE_1D(evt, d1) \
- HVMTRACE_ND(evt, 0, 0, 1, d1, 0, 0, 0, 0, 0)
-#define HVMTRACE_0D(evt) \
- HVMTRACE_ND(evt, 0, 0, 0, 0, 0, 0, 0, 0, 0)
+#define HVMTRACE_7D(evt, d1, d2, d3, d4, d5, d6, d7) \
+ HVMTRACE_ND(evt, 0, 0, 7, d1, d2, d3, d4, d5, d6, d7)
+#define HVMTRACE_6D(evt, d1, d2, d3, d4, d5, d6) \
+ HVMTRACE_ND(evt, 0, 0, 6, d1, d2, d3, d4, d5, d6, 0)
+#define HVMTRACE_5D(evt, d1, d2, d3, d4, d5) \
+ HVMTRACE_ND(evt, 0, 0, 5, d1, d2, d3, d4, d5, 0, 0)
+#define HVMTRACE_4D(evt, d1, d2, d3, d4) \
+ HVMTRACE_ND(evt, 0, 0, 4, d1, d2, d3, d4, 0, 0, 0)
+#define HVMTRACE_3D(evt, d1, d2, d3) \
+ HVMTRACE_ND(evt, 0, 0, 3, d1, d2, d3, 0, 0, 0, 0)
+#define HVMTRACE_2D(evt, d1, d2) \
+ HVMTRACE_ND(evt, 0, 0, 2, d1, d2, 0, 0, 0, 0, 0)
+#define HVMTRACE_1D(evt, d1) \
+ HVMTRACE_ND(evt, 0, 0, 1, d1, 0, 0, 0, 0, 0, 0)
+#define HVMTRACE_0D(evt) \
+ HVMTRACE_ND(evt, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0)
#define HVMTRACE_LONG_1D(evt, d1) \
HVMTRACE_2D(evt ## 64, (d1) & 0xFFFFFFFF, (d1) >> 32)
@@ -237,6 +237,10 @@
#define TRC_HVM_VLAPIC (TRC_HVM_HANDLER + 0x25)
#define TRC_HVM_XCR_READ64 (TRC_HVM_HANDLER + TRC_64_FLAG + 0x26)
#define TRC_HVM_XCR_WRITE64 (TRC_HVM_HANDLER + TRC_64_FLAG + 0x27)
+#define TRC_HVM_VMPORT_HANDLED (TRC_HVM_HANDLER + 0x28)
+#define TRC_HVM_VMPORT_IGNORED (TRC_HVM_HANDLER + 0x29)
+#define TRC_HVM_VMPORT_QEMU (TRC_HVM_HANDLER + 0x2a)
+#define TRC_HVM_VMPORT_SEND (TRC_HVM_HANDLER + 0x2b)
#define TRC_HVM_IOPORT_WRITE (TRC_HVM_HANDLER + 0x216)
#define TRC_HVM_IOMEM_WRITE (TRC_HVM_HANDLER + 0x217)