@@ -125,6 +125,7 @@
0x00082020 CPU%(cpu)d %(tsc)d (+%(reltsc)8d) INTR_WINDOW [ value = 0x%(1)08x ]
0x00082021 CPU%(cpu)d %(tsc)d (+%(reltsc)8d) NPF [ gpa = 0x%(2)08x%(1)08x mfn = 0x%(4)08x%(3)08x qual = 0x%(5)04x p2mt = 0x%(6)04x ]
0x00082023 CPU%(cpu)d %(tsc)d (+%(reltsc)8d) TRAP [ vector = 0x%(1)02x ]
+0x00082026 CPU%(cpu)d %(tsc)d (+%(reltsc)8d) PI_BLOCK_LIST [ domid = 0x%(1)04x vcpu = 0x%(2)04x, pcpu = 0x%(3)04x, #entry = 0x%(4)04x ]
0x0010f001 CPU%(cpu)d %(tsc)d (+%(reltsc)8d) page_grant_map [ domid = %(1)d ]
0x0010f002 CPU%(cpu)d %(tsc)d (+%(reltsc)8d) page_grant_unmap [ domid = %(1)d ]
@@ -82,6 +82,7 @@ static int vmx_vmfunc_intercept(struct cpu_user_regs *regs);
struct vmx_pi_blocking_vcpu {
struct list_head list;
spinlock_t lock;
+ atomic_t counter;
};
/*
@@ -119,6 +120,9 @@ static void vmx_vcpu_block(struct vcpu *v)
*/
ASSERT(old_lock == NULL);
+ atomic_inc(&per_cpu(vmx_pi_blocking, v->processor).counter);
+ HVMTRACE_4D(VT_D_PI_BLOCK, v->domain->domain_id, v->vcpu_id, v->processor,
+ atomic_read(&per_cpu(vmx_pi_blocking, v->processor).counter));
list_add_tail(&v->arch.hvm_vmx.pi_blocking.list,
&per_cpu(vmx_pi_blocking, v->processor).list);
spin_unlock_irqrestore(pi_blocking_list_lock, flags);
@@ -186,6 +190,8 @@ static void vmx_pi_unblock_vcpu(struct vcpu *v)
{
ASSERT(v->arch.hvm_vmx.pi_blocking.lock == pi_blocking_list_lock);
list_del(&v->arch.hvm_vmx.pi_blocking.list);
+ atomic_dec(&container_of(pi_blocking_list_lock,
+ struct vmx_pi_blocking_vcpu, lock)->counter);
v->arch.hvm_vmx.pi_blocking.lock = NULL;
}
@@ -234,6 +240,7 @@ void vmx_pi_desc_fixup(unsigned int cpu)
if ( pi_test_on(&vmx->pi_desc) )
{
list_del(&vmx->pi_blocking.list);
+ atomic_dec(&per_cpu(vmx_pi_blocking, cpu).counter);
vmx->pi_blocking.lock = NULL;
vcpu_unblock(container_of(vmx, struct vcpu, arch.hvm_vmx));
}
@@ -2360,7 +2367,7 @@ static void pi_wakeup_interrupt(struct cpu_user_regs *regs)
struct arch_vmx_struct *vmx, *tmp;
spinlock_t *lock = &per_cpu(vmx_pi_blocking, smp_processor_id()).lock;
struct list_head *blocked_vcpus =
- &per_cpu(vmx_pi_blocking, smp_processor_id()).list;
+ &per_cpu(vmx_pi_blocking, smp_processor_id()).list;
ack_APIC_irq();
this_cpu(irq_count)++;
@@ -2377,6 +2384,7 @@ static void pi_wakeup_interrupt(struct cpu_user_regs *regs)
if ( pi_test_on(&vmx->pi_desc) )
{
list_del(&vmx->pi_blocking.list);
+ atomic_dec(&per_cpu(vmx_pi_blocking, smp_processor_id()).counter);
ASSERT(vmx->pi_blocking.lock == lock);
vmx->pi_blocking.lock = NULL;
vcpu_unblock(container_of(vmx, struct vcpu, arch.hvm_vmx));
@@ -54,6 +54,7 @@
#define DO_TRC_HVM_TRAP DEFAULT_HVM_MISC
#define DO_TRC_HVM_TRAP_DEBUG DEFAULT_HVM_MISC
#define DO_TRC_HVM_VLAPIC DEFAULT_HVM_MISC
+#define DO_TRC_HVM_VT_D_PI_BLOCK DEFAULT_HVM_MISC
#define TRC_PAR_LONG(par) ((par)&0xFFFFFFFF),((par)>>32)
@@ -234,6 +234,7 @@
#define TRC_HVM_TRAP (TRC_HVM_HANDLER + 0x23)
#define TRC_HVM_TRAP_DEBUG (TRC_HVM_HANDLER + 0x24)
#define TRC_HVM_VLAPIC (TRC_HVM_HANDLER + 0x25)
+#define TRC_HVM_VT_D_PI_BLOCK (TRC_HVM_HANDLER + 0x26)
#define TRC_HVM_IOPORT_WRITE (TRC_HVM_HANDLER + 0x216)
#define TRC_HVM_IOMEM_WRITE (TRC_HVM_HANDLER + 0x217)
This patch adds TRC_HVM_VT_D_PI_BLOCK to track adding one entry to the per-pcpu blocking list. Also introduce a 'counter' to track the number of entries in the list. Signed-off-by: Chao Gao <chao.gao@intel.com> --- tools/xentrace/formats | 1 + xen/arch/x86/hvm/vmx/vmx.c | 10 +++++++++- xen/include/asm-x86/hvm/trace.h | 1 + xen/include/public/trace.h | 1 + 4 files changed, 12 insertions(+), 1 deletion(-)