From patchwork Thu Aug 29 10:18:42 2019 Content-Type: text/plain; charset="utf-8" MIME-Version: 1.0 Content-Transfer-Encoding: 7bit X-Patchwork-Submitter: =?utf-8?b?SsO8cmdlbiBHcm/Dnw==?= X-Patchwork-Id: 11120851 Return-Path: Received: from mail.kernel.org (pdx-korg-mail-1.web.codeaurora.org [172.30.200.123]) by pdx-korg-patchwork-2.web.codeaurora.org (Postfix) with ESMTP id 38FFF1395 for ; Thu, 29 Aug 2019 10:21:05 +0000 (UTC) Received: from lists.xenproject.org (lists.xenproject.org [192.237.175.120]) (using TLSv1.2 with cipher ECDHE-RSA-AES256-GCM-SHA384 (256/256 bits)) (No client certificate requested) by mail.kernel.org (Postfix) with ESMTPS id 1FD5322CF5 for ; Thu, 29 Aug 2019 10:21:05 +0000 (UTC) DMARC-Filter: OpenDMARC Filter v1.3.2 mail.kernel.org 1FD5322CF5 Authentication-Results: mail.kernel.org; dmarc=none (p=none dis=none) header.from=suse.com Authentication-Results: mail.kernel.org; spf=none smtp.mailfrom=xen-devel-bounces@lists.xenproject.org Received: from localhost ([127.0.0.1] helo=lists.xenproject.org) by lists.xenproject.org with esmtp (Exim 4.89) (envelope-from ) id 1i3HWn-0003bS-LE; Thu, 29 Aug 2019 10:19:41 +0000 Received: from all-amaz-eas1.inumbo.com ([34.197.232.57] helo=us1-amaz-eas2.inumbo.com) by lists.xenproject.org with esmtp (Exim 4.89) (envelope-from ) id 1i3HWl-0003bA-MR for xen-devel@lists.xenproject.org; Thu, 29 Aug 2019 10:19:39 +0000 X-Inumbo-ID: 7b82d4d9-ca46-11e9-ae64-12813bfff9fa Received: from mx1.suse.de (unknown [195.135.220.15]) by us1-amaz-eas2.inumbo.com (Halon) with ESMTPS id 7b82d4d9-ca46-11e9-ae64-12813bfff9fa; Thu, 29 Aug 2019 10:19:28 +0000 (UTC) X-Virus-Scanned: by amavisd-new at test-mx.suse.de Received: from relay2.suse.de (unknown [195.135.220.254]) by mx1.suse.de (Postfix) with ESMTP id 64057B64A; Thu, 29 Aug 2019 10:18:49 +0000 (UTC) From: Juergen Gross To: xen-devel@lists.xenproject.org Date: Thu, 29 Aug 2019 12:18:42 +0200 Message-Id: <20190829101846.21369-2-jgross@suse.com> X-Mailer: git-send-email 2.16.4 In-Reply-To: <20190829101846.21369-1-jgross@suse.com> References: <20190829101846.21369-1-jgross@suse.com> Subject: [Xen-devel] [PATCH v3 1/5] xen/spinlocks: in debug builds store cpu holding the lock X-BeenThere: xen-devel@lists.xenproject.org X-Mailman-Version: 2.1.23 Precedence: list List-Id: Xen developer discussion List-Unsubscribe: , List-Post: List-Help: List-Subscribe: , Cc: Juergen Gross , Stefano Stabellini , Wei Liu , Konrad Rzeszutek Wilk , George Dunlap , Andrew Cooper , Ian Jackson , Tim Deegan , Julien Grall , Jan Beulich MIME-Version: 1.0 Errors-To: xen-devel-bounces@lists.xenproject.org Sender: "Xen-devel" Add the cpu currently holding the lock to struct lock_debug. This makes analysis of locking errors easier and it can be tested whether the correct cpu is releasing a lock again. Signed-off-by: Juergen Gross --- V2: adjust types (Jan Beulich) --- xen/common/spinlock.c | 33 ++++++++++++++++++++++++++------- xen/include/xen/spinlock.h | 17 ++++++++++++----- 2 files changed, 38 insertions(+), 12 deletions(-) diff --git a/xen/common/spinlock.c b/xen/common/spinlock.c index 6bc52d70c0..1be1b5ebe6 100644 --- a/xen/common/spinlock.c +++ b/xen/common/spinlock.c @@ -13,9 +13,9 @@ static atomic_t spin_debug __read_mostly = ATOMIC_INIT(0); -static void check_lock(struct lock_debug *debug) +static void check_lock(union lock_debug *debug) { - int irq_safe = !local_irq_is_enabled(); + bool irq_safe = !local_irq_is_enabled(); if ( unlikely(atomic_read(&spin_debug) <= 0) ) return; @@ -43,18 +43,21 @@ static void check_lock(struct lock_debug *debug) */ if ( unlikely(debug->irq_safe != irq_safe) ) { - int seen = cmpxchg(&debug->irq_safe, -1, irq_safe); + union lock_debug seen, new = { 0 }; - if ( seen == !irq_safe ) + new.irq_safe = irq_safe; + seen.val = cmpxchg(&debug->val, LOCK_DEBUG_INITVAL, new.val); + + if ( !seen.unseen && seen.irq_safe == !irq_safe ) { printk("CHECKLOCK FAILURE: prev irqsafe: %d, curr irqsafe %d\n", - seen, irq_safe); + seen.irq_safe, irq_safe); BUG(); } } } -static void check_barrier(struct lock_debug *debug) +static void check_barrier(union lock_debug *debug) { if ( unlikely(atomic_read(&spin_debug) <= 0) ) return; @@ -70,7 +73,18 @@ static void check_barrier(struct lock_debug *debug) * However, if we spin on an IRQ-unsafe lock with IRQs disabled then that * is clearly wrong, for the same reason outlined in check_lock() above. */ - BUG_ON(!local_irq_is_enabled() && (debug->irq_safe == 0)); + BUG_ON(!local_irq_is_enabled() && !debug->irq_safe); +} + +static void got_lock(union lock_debug *debug) +{ + debug->cpu = smp_processor_id(); +} + +static void rel_lock(union lock_debug *debug) +{ + ASSERT(debug->cpu == smp_processor_id()); + debug->cpu = SPINLOCK_NO_CPU; } void spin_debug_enable(void) @@ -87,6 +101,8 @@ void spin_debug_disable(void) #define check_lock(l) ((void)0) #define check_barrier(l) ((void)0) +#define got_lock(l) ((void)0) +#define rel_lock(l) ((void)0) #endif @@ -150,6 +166,7 @@ void inline _spin_lock_cb(spinlock_t *lock, void (*cb)(void *), void *data) cb(data); arch_lock_relax(); } + got_lock(&lock->debug); LOCK_PROFILE_GOT; preempt_disable(); arch_lock_acquire_barrier(); @@ -181,6 +198,7 @@ void _spin_unlock(spinlock_t *lock) arch_lock_release_barrier(); preempt_enable(); LOCK_PROFILE_REL; + rel_lock(&lock->debug); add_sized(&lock->tickets.head, 1); arch_lock_signal(); } @@ -224,6 +242,7 @@ int _spin_trylock(spinlock_t *lock) if ( cmpxchg(&lock->tickets.head_tail, old.head_tail, new.head_tail) != old.head_tail ) return 0; + got_lock(&lock->debug); #ifdef CONFIG_LOCK_PROFILE if (lock->profile) lock->profile->time_locked = NOW(); diff --git a/xen/include/xen/spinlock.h b/xen/include/xen/spinlock.h index 2c7415e23a..88017e0a89 100644 --- a/xen/include/xen/spinlock.h +++ b/xen/include/xen/spinlock.h @@ -6,14 +6,21 @@ #include #ifndef NDEBUG -struct lock_debug { - s16 irq_safe; /* +1: IRQ-safe; 0: not IRQ-safe; -1: don't know yet */ +union lock_debug { + uint16_t val; +#define LOCK_DEBUG_INITVAL 0xffff + struct { + uint16_t cpu:12; + uint16_t pad:2; + bool irq_safe:1; + bool unseen:1; + }; }; -#define _LOCK_DEBUG { -1 } +#define _LOCK_DEBUG { LOCK_DEBUG_INITVAL } void spin_debug_enable(void); void spin_debug_disable(void); #else -struct lock_debug { }; +union lock_debug { }; #define _LOCK_DEBUG { } #define spin_debug_enable() ((void)0) #define spin_debug_disable() ((void)0) @@ -142,7 +149,7 @@ typedef struct spinlock { #define SPINLOCK_NO_CPU 0xfffu u16 recurse_cnt:4; #define SPINLOCK_MAX_RECURSE 0xfu - struct lock_debug debug; + union lock_debug debug; #ifdef CONFIG_LOCK_PROFILE struct lock_profile *profile; #endif From patchwork Thu Aug 29 10:18:43 2019 Content-Type: text/plain; charset="utf-8" MIME-Version: 1.0 Content-Transfer-Encoding: 7bit X-Patchwork-Submitter: =?utf-8?b?SsO8cmdlbiBHcm/Dnw==?= X-Patchwork-Id: 11120855 Return-Path: Received: from mail.kernel.org (pdx-korg-mail-1.web.codeaurora.org [172.30.200.123]) by pdx-korg-patchwork-2.web.codeaurora.org (Postfix) with ESMTP id 7F6D514F7 for ; Thu, 29 Aug 2019 10:21:27 +0000 (UTC) Received: from lists.xenproject.org (lists.xenproject.org [192.237.175.120]) (using TLSv1.2 with cipher ECDHE-RSA-AES256-GCM-SHA384 (256/256 bits)) (No client certificate requested) by mail.kernel.org (Postfix) with ESMTPS id 665A322CF5 for ; Thu, 29 Aug 2019 10:21:27 +0000 (UTC) DMARC-Filter: OpenDMARC Filter v1.3.2 mail.kernel.org 665A322CF5 Authentication-Results: mail.kernel.org; dmarc=none (p=none dis=none) header.from=suse.com Authentication-Results: mail.kernel.org; spf=none smtp.mailfrom=xen-devel-bounces@lists.xenproject.org Received: from localhost ([127.0.0.1] helo=lists.xenproject.org) by lists.xenproject.org with esmtp (Exim 4.89) (envelope-from ) id 1i3HWi-0003af-6z; Thu, 29 Aug 2019 10:19:36 +0000 Received: from all-amaz-eas1.inumbo.com ([34.197.232.57] helo=us1-amaz-eas2.inumbo.com) by lists.xenproject.org with esmtp (Exim 4.89) (envelope-from ) id 1i3HWg-0003aY-MK for xen-devel@lists.xenproject.org; Thu, 29 Aug 2019 10:19:34 +0000 X-Inumbo-ID: 7bda120a-ca46-11e9-ae64-12813bfff9fa Received: from mx1.suse.de (unknown [195.135.220.15]) by us1-amaz-eas2.inumbo.com (Halon) with ESMTPS id 7bda120a-ca46-11e9-ae64-12813bfff9fa; Thu, 29 Aug 2019 10:19:29 +0000 (UTC) X-Virus-Scanned: by amavisd-new at test-mx.suse.de Received: from relay2.suse.de (unknown [195.135.220.254]) by mx1.suse.de (Postfix) with ESMTP id BE244B64C; Thu, 29 Aug 2019 10:18:49 +0000 (UTC) From: Juergen Gross To: xen-devel@lists.xenproject.org Date: Thu, 29 Aug 2019 12:18:43 +0200 Message-Id: <20190829101846.21369-3-jgross@suse.com> X-Mailer: git-send-email 2.16.4 In-Reply-To: <20190829101846.21369-1-jgross@suse.com> References: <20190829101846.21369-1-jgross@suse.com> Subject: [Xen-devel] [PATCH v3 2/5] xen: add new CONFIG_DEBUG_LOCKS option X-BeenThere: xen-devel@lists.xenproject.org X-Mailman-Version: 2.1.23 Precedence: list List-Id: Xen developer discussion List-Unsubscribe: , List-Post: List-Help: List-Subscribe: , Cc: Juergen Gross , Stefano Stabellini , Wei Liu , Konrad Rzeszutek Wilk , George Dunlap , Andrew Cooper , Ian Jackson , Tim Deegan , Julien Grall , Jan Beulich MIME-Version: 1.0 Errors-To: xen-devel-bounces@lists.xenproject.org Sender: "Xen-devel" Instead of enabling debugging for debug builds only add a dedicated Kconfig option for that purpose which defaults to DEBUG. Signed-off-by: Juergen Gross Reviewed-by: Jan Beulich --- V2: - rename to CONFIG_DEBUG_LOCKS (Jan Beulich) --- xen/Kconfig.debug | 7 +++++++ xen/common/spinlock.c | 4 ++-- xen/include/xen/spinlock.h | 2 +- 3 files changed, 10 insertions(+), 3 deletions(-) diff --git a/xen/Kconfig.debug b/xen/Kconfig.debug index e10e314e25..1faaa3ba6a 100644 --- a/xen/Kconfig.debug +++ b/xen/Kconfig.debug @@ -51,6 +51,13 @@ config LOCK_PROFILE You can use serial console to print (and reset) using 'l' and 'L' respectively, or the 'xenlockprof' tool. +config DEBUG_LOCKS + bool "Lock debugging" + default DEBUG + ---help--- + Enable debugging features of lock handling. Some additional + checks will be performed when acquiring and releasing locks. + config PERF_COUNTERS bool "Performance Counters" ---help--- diff --git a/xen/common/spinlock.c b/xen/common/spinlock.c index 1be1b5ebe6..79e70a9947 100644 --- a/xen/common/spinlock.c +++ b/xen/common/spinlock.c @@ -9,7 +9,7 @@ #include #include -#ifndef NDEBUG +#ifdef CONFIG_DEBUG_LOCKS static atomic_t spin_debug __read_mostly = ATOMIC_INIT(0); @@ -97,7 +97,7 @@ void spin_debug_disable(void) atomic_dec(&spin_debug); } -#else /* defined(NDEBUG) */ +#else /* CONFIG_DEBUG_LOCKS */ #define check_lock(l) ((void)0) #define check_barrier(l) ((void)0) diff --git a/xen/include/xen/spinlock.h b/xen/include/xen/spinlock.h index 88017e0a89..736490f52b 100644 --- a/xen/include/xen/spinlock.h +++ b/xen/include/xen/spinlock.h @@ -5,7 +5,7 @@ #include #include -#ifndef NDEBUG +#ifdef CONFIG_DEBUG_LOCKS union lock_debug { uint16_t val; #define LOCK_DEBUG_INITVAL 0xffff From patchwork Thu Aug 29 10:18:44 2019 Content-Type: text/plain; charset="utf-8" MIME-Version: 1.0 Content-Transfer-Encoding: 7bit X-Patchwork-Submitter: =?utf-8?b?SsO8cmdlbiBHcm/Dnw==?= X-Patchwork-Id: 11120861 Return-Path: Received: from mail.kernel.org (pdx-korg-mail-1.web.codeaurora.org [172.30.200.123]) by pdx-korg-patchwork-2.web.codeaurora.org (Postfix) with ESMTP id 62EF11395 for ; Thu, 29 Aug 2019 10:21:41 +0000 (UTC) Received: from lists.xenproject.org (lists.xenproject.org [192.237.175.120]) (using TLSv1.2 with cipher ECDHE-RSA-AES256-GCM-SHA384 (256/256 bits)) (No client certificate requested) by mail.kernel.org (Postfix) with ESMTPS id 3EA8322CF5 for ; Thu, 29 Aug 2019 10:21:41 +0000 (UTC) DMARC-Filter: OpenDMARC Filter v1.3.2 mail.kernel.org 3EA8322CF5 Authentication-Results: mail.kernel.org; dmarc=none (p=none dis=none) header.from=suse.com Authentication-Results: mail.kernel.org; spf=none smtp.mailfrom=xen-devel-bounces@lists.xenproject.org Received: from localhost ([127.0.0.1] helo=lists.xenproject.org) by lists.xenproject.org with esmtp (Exim 4.89) (envelope-from ) id 1i3HX2-0003ie-HF; Thu, 29 Aug 2019 10:19:56 +0000 Received: from all-amaz-eas1.inumbo.com ([34.197.232.57] helo=us1-amaz-eas2.inumbo.com) by lists.xenproject.org with esmtp (Exim 4.89) (envelope-from ) id 1i3HX0-0003hr-NE for xen-devel@lists.xenproject.org; Thu, 29 Aug 2019 10:19:54 +0000 X-Inumbo-ID: 7b82d4dc-ca46-11e9-ae64-12813bfff9fa Received: from mx1.suse.de (unknown [195.135.220.15]) by us1-amaz-eas2.inumbo.com (Halon) with ESMTPS id 7b82d4dc-ca46-11e9-ae64-12813bfff9fa; Thu, 29 Aug 2019 10:19:29 +0000 (UTC) X-Virus-Scanned: by amavisd-new at test-mx.suse.de Received: from relay2.suse.de (unknown [195.135.220.254]) by mx1.suse.de (Postfix) with ESMTP id B9903AFC2; Thu, 29 Aug 2019 10:19:27 +0000 (UTC) From: Juergen Gross To: xen-devel@lists.xenproject.org Date: Thu, 29 Aug 2019 12:18:44 +0200 Message-Id: <20190829101846.21369-4-jgross@suse.com> X-Mailer: git-send-email 2.16.4 In-Reply-To: <20190829101846.21369-1-jgross@suse.com> References: <20190829101846.21369-1-jgross@suse.com> Subject: [Xen-devel] [PATCH v3 3/5] xen: print lock profile info in panic() X-BeenThere: xen-devel@lists.xenproject.org X-Mailman-Version: 2.1.23 Precedence: list List-Id: Xen developer discussion List-Unsubscribe: , List-Post: List-Help: List-Subscribe: , Cc: Juergen Gross , Stefano Stabellini , Wei Liu , Konrad Rzeszutek Wilk , George Dunlap , Andrew Cooper , Ian Jackson , Tim Deegan , Julien Grall , Jan Beulich , Volodymyr Babchuk , =?utf-8?q?Roger_Pau_Monn?= =?utf-8?q?=C3=A9?= MIME-Version: 1.0 Errors-To: xen-devel-bounces@lists.xenproject.org Sender: "Xen-devel" Print the lock profile data when the system crashes and add some more information for each lock data (lock address, cpu holding the lock). While at it use the PRI_stime format specifier for printing time data. This is especially beneficial for watchdog triggered crashes in case of deadlocks. In order to have the cpu holding the lock available let the lock profile config option select DEBUG_LOCKS. As printing the lock profile data will make use of locking, too, we need to disable spinlock debugging before calling spinlock_profile_printall() from panic(). While at it remove a superfluous #ifdef CONFIG_LOCK_PROFILE and rename CONFIG_LOCK_PROFILE to CONFIG_DEBUG_LOCK_PROFILE. Also move the .lockprofile.data section to init area in linker scripts as the data is no longer needed after boot. Signed-off-by: Juergen Gross Acked-by: Jan Beulich --- V2: - rename CONFIG_LOCK_PROFILE to CONFIG_DEBUG_LOCK_PROFILE (Jan Beulich) - move .lockprofile.data section to init area in linker scripts - use PRI_stime (Andrew Cooper) - don't print "cpu=4095", but "not locked" (Andrew Cooper) --- xen/Kconfig.debug | 3 ++- xen/arch/arm/xen.lds.S | 13 +++++++------ xen/arch/x86/domain.c | 2 +- xen/arch/x86/xen.lds.S | 13 +++++++------ xen/common/keyhandler.c | 2 +- xen/common/spinlock.c | 33 ++++++++++++++++++--------------- xen/common/sysctl.c | 2 +- xen/drivers/char/console.c | 4 +++- xen/include/xen/spinlock.h | 12 +++++++----- 9 files changed, 47 insertions(+), 37 deletions(-) diff --git a/xen/Kconfig.debug b/xen/Kconfig.debug index 1faaa3ba6a..22573e74db 100644 --- a/xen/Kconfig.debug +++ b/xen/Kconfig.debug @@ -44,8 +44,9 @@ config COVERAGE If unsure, say N here. -config LOCK_PROFILE +config DEBUG_LOCK_PROFILE bool "Lock Profiling" + select DEBUG_LOCKS ---help--- Lock profiling allows you to see how often locks are taken and blocked. You can use serial console to print (and reset) using 'l' and 'L' diff --git a/xen/arch/arm/xen.lds.S b/xen/arch/arm/xen.lds.S index 16ce1dd01e..a497f6a48d 100644 --- a/xen/arch/arm/xen.lds.S +++ b/xen/arch/arm/xen.lds.S @@ -54,12 +54,6 @@ SECTIONS *(.data.rel.ro) *(.data.rel.ro.*) -#ifdef CONFIG_LOCK_PROFILE - . = ALIGN(POINTER_ALIGN); - __lock_profile_start = .; - *(.lockprofile.data) - __lock_profile_end = .; -#endif . = ALIGN(POINTER_ALIGN); __param_start = .; *(.data.param) @@ -173,6 +167,13 @@ SECTIONS . = ALIGN(4); *(.altinstr_replacement) +#ifdef CONFIG_DEBUG_LOCK_PROFILE + . = ALIGN(POINTER_ALIGN); + __lock_profile_start = .; + *(.lockprofile.data) + __lock_profile_end = .; +#endif + *(.init.data) *(.init.data.rel) *(.init.data.rel.*) diff --git a/xen/arch/x86/domain.c b/xen/arch/x86/domain.c index bc082ed827..b1c67c2d28 100644 --- a/xen/arch/x86/domain.c +++ b/xen/arch/x86/domain.c @@ -305,7 +305,7 @@ struct domain *alloc_domain_struct(void) #endif -#ifndef CONFIG_LOCK_PROFILE +#ifndef CONFIG_DEBUG_LOCK_PROFILE BUILD_BUG_ON(sizeof(*d) > PAGE_SIZE); #endif d = alloc_xenheap_pages(order, MEMF_bits(bits)); diff --git a/xen/arch/x86/xen.lds.S b/xen/arch/x86/xen.lds.S index 87fa02b9b5..111edb5360 100644 --- a/xen/arch/x86/xen.lds.S +++ b/xen/arch/x86/xen.lds.S @@ -128,12 +128,6 @@ SECTIONS *(.ex_table.pre) __stop___pre_ex_table = .; -#ifdef CONFIG_LOCK_PROFILE - . = ALIGN(POINTER_ALIGN); - __lock_profile_start = .; - *(.lockprofile.data) - __lock_profile_end = .; -#endif . = ALIGN(POINTER_ALIGN); __param_start = .; *(.data.param) @@ -251,6 +245,13 @@ SECTIONS *(.altinstructions) __alt_instructions_end = .; +#ifdef CONFIG_DEBUG_LOCK_PROFILE + . = ALIGN(POINTER_ALIGN); + __lock_profile_start = .; + *(.lockprofile.data) + __lock_profile_end = .; +#endif + . = ALIGN(8); __ctors_start = .; *(.ctors) diff --git a/xen/common/keyhandler.c b/xen/common/keyhandler.c index 57b360ee4b..c36baa4dff 100644 --- a/xen/common/keyhandler.c +++ b/xen/common/keyhandler.c @@ -62,7 +62,7 @@ static struct keyhandler { KEYHANDLER('P', perfc_reset, "reset performance counters", 0), #endif -#ifdef CONFIG_LOCK_PROFILE +#ifdef CONFIG_DEBUG_LOCK_PROFILE KEYHANDLER('l', spinlock_profile_printall, "print lock profile info", 1), KEYHANDLER('L', spinlock_profile_reset, "reset lock profile info", 0), #endif diff --git a/xen/common/spinlock.c b/xen/common/spinlock.c index 79e70a9947..c4f706c627 100644 --- a/xen/common/spinlock.c +++ b/xen/common/spinlock.c @@ -106,7 +106,7 @@ void spin_debug_disable(void) #endif -#ifdef CONFIG_LOCK_PROFILE +#ifdef CONFIG_DEBUG_LOCK_PROFILE #define LOCK_PROFILE_REL \ if (lock->profile) \ @@ -243,7 +243,7 @@ int _spin_trylock(spinlock_t *lock) old.head_tail, new.head_tail) != old.head_tail ) return 0; got_lock(&lock->debug); -#ifdef CONFIG_LOCK_PROFILE +#ifdef CONFIG_DEBUG_LOCK_PROFILE if (lock->profile) lock->profile->time_locked = NOW(); #endif @@ -258,7 +258,7 @@ int _spin_trylock(spinlock_t *lock) void _spin_barrier(spinlock_t *lock) { spinlock_tickets_t sample; -#ifdef CONFIG_LOCK_PROFILE +#ifdef CONFIG_DEBUG_LOCK_PROFILE s_time_t block = NOW(); #endif @@ -269,7 +269,7 @@ void _spin_barrier(spinlock_t *lock) { while ( observe_head(&lock->tickets) == sample.head ) arch_lock_relax(); -#ifdef CONFIG_LOCK_PROFILE +#ifdef CONFIG_DEBUG_LOCK_PROFILE if ( lock->profile ) { lock->profile->time_block += NOW() - block; @@ -327,7 +327,7 @@ void _spin_unlock_recursive(spinlock_t *lock) } } -#ifdef CONFIG_LOCK_PROFILE +#ifdef CONFIG_DEBUG_LOCK_PROFILE struct lock_profile_anc { struct lock_profile_qhead *head_q; /* first head of this type */ @@ -362,14 +362,19 @@ static void spinlock_profile_iterate(lock_profile_subfunc *sub, void *par) static void spinlock_profile_print_elem(struct lock_profile *data, int32_t type, int32_t idx, void *par) { - if ( type == LOCKPROF_TYPE_GLOBAL ) - printk("%s %s:\n", lock_profile_ancs[type].name, data->name); + struct spinlock *lock = data->lock; + + printk("%s ", lock_profile_ancs[type].name); + if ( type != LOCKPROF_TYPE_GLOBAL ) + printk("%d ", idx); + printk("%s: addr=%p, lockval=%08x, ", data->name, lock, + lock->tickets.head_tail); + if ( lock->debug.cpu == SPINLOCK_NO_CPU ) + printk("not locked\n"); else - printk("%s %d %s:\n", lock_profile_ancs[type].name, idx, data->name); - printk(" lock:%12"PRId64"(%08X:%08X), block:%12"PRId64"(%08X:%08X)\n", - data->lock_cnt, (u32)(data->time_hold >> 32), (u32)data->time_hold, - data->block_cnt, (u32)(data->time_block >> 32), - (u32)data->time_block); + printk("cpu=%d\n", lock->debug.cpu); + printk(" lock:%" PRId64 "(%" PRI_stime "), block:%" PRId64 "(%" PRI_stime ")\n", + data->lock_cnt, data->time_hold, data->block_cnt, data->time_block); } void spinlock_profile_printall(unsigned char key) @@ -488,7 +493,6 @@ void _lock_profile_deregister_struct( spin_unlock(&lock_profile_lock); } -#ifdef CONFIG_LOCK_PROFILE static int __init lock_prof_init(void) { struct lock_profile **q; @@ -507,6 +511,5 @@ static int __init lock_prof_init(void) return 0; } __initcall(lock_prof_init); -#endif -#endif /* LOCK_PROFILE */ +#endif /* CONFIG_DEBUG_LOCK_PROFILE */ diff --git a/xen/common/sysctl.c b/xen/common/sysctl.c index fcf2d2fd7c..9db94754d3 100644 --- a/xen/common/sysctl.c +++ b/xen/common/sysctl.c @@ -119,7 +119,7 @@ long do_sysctl(XEN_GUEST_HANDLE_PARAM(xen_sysctl_t) u_sysctl) break; #endif -#ifdef CONFIG_LOCK_PROFILE +#ifdef CONFIG_DEBUG_LOCK_PROFILE case XEN_SYSCTL_lockprof_op: ret = spinlock_profile_control(&op->u.lockprof_op); break; diff --git a/xen/drivers/char/console.c b/xen/drivers/char/console.c index 7f29190eaf..e133534be7 100644 --- a/xen/drivers/char/console.c +++ b/xen/drivers/char/console.c @@ -1170,7 +1170,9 @@ void panic(const char *fmt, ...) unsigned long flags; static DEFINE_SPINLOCK(lock); static char buf[128]; - + + spin_debug_disable(); + spinlock_profile_printall('\0'); debugtrace_dump(); /* Protects buf[] and ensure multi-line message prints atomically. */ diff --git a/xen/include/xen/spinlock.h b/xen/include/xen/spinlock.h index 736490f52b..a7c1967ec7 100644 --- a/xen/include/xen/spinlock.h +++ b/xen/include/xen/spinlock.h @@ -1,6 +1,7 @@ #ifndef __SPINLOCK_H__ #define __SPINLOCK_H__ +#include #include #include #include @@ -26,7 +27,7 @@ union lock_debug { }; #define spin_debug_disable() ((void)0) #endif -#ifdef CONFIG_LOCK_PROFILE +#ifdef CONFIG_DEBUG_LOCK_PROFILE #include @@ -72,9 +73,9 @@ struct lock_profile { struct spinlock *lock; /* the lock itself */ u64 lock_cnt; /* # of complete locking ops */ u64 block_cnt; /* # of complete wait for lock */ - s64 time_hold; /* cumulated lock time */ - s64 time_block; /* cumulated wait time */ - s64 time_locked; /* system time of last locking */ + s_time_t time_hold; /* cumulated lock time */ + s_time_t time_block; /* cumulated wait time */ + s_time_t time_locked; /* system time of last locking */ }; struct lock_profile_qhead { @@ -130,6 +131,7 @@ struct lock_profile_qhead { }; #define spin_lock_init_prof(s, l) spin_lock_init(&((s)->l)) #define lock_profile_register_struct(type, ptr, idx, print) #define lock_profile_deregister_struct(type, ptr) +#define spinlock_profile_printall(key) #endif @@ -150,7 +152,7 @@ typedef struct spinlock { u16 recurse_cnt:4; #define SPINLOCK_MAX_RECURSE 0xfu union lock_debug debug; -#ifdef CONFIG_LOCK_PROFILE +#ifdef CONFIG_DEBUG_LOCK_PROFILE struct lock_profile *profile; #endif } spinlock_t; From patchwork Thu Aug 29 10:18:45 2019 Content-Type: text/plain; charset="utf-8" MIME-Version: 1.0 Content-Transfer-Encoding: 7bit X-Patchwork-Submitter: =?utf-8?b?SsO8cmdlbiBHcm/Dnw==?= X-Patchwork-Id: 11120853 Return-Path: Received: from mail.kernel.org (pdx-korg-mail-1.web.codeaurora.org [172.30.200.123]) by pdx-korg-patchwork-2.web.codeaurora.org (Postfix) with ESMTP id 44DB21395 for ; Thu, 29 Aug 2019 10:21:07 +0000 (UTC) Received: from lists.xenproject.org (lists.xenproject.org [192.237.175.120]) (using TLSv1.2 with cipher ECDHE-RSA-AES256-GCM-SHA384 (256/256 bits)) (No client certificate requested) by mail.kernel.org (Postfix) with ESMTPS id 2083122CF5 for ; Thu, 29 Aug 2019 10:21:07 +0000 (UTC) DMARC-Filter: OpenDMARC Filter v1.3.2 mail.kernel.org 2083122CF5 Authentication-Results: mail.kernel.org; dmarc=none (p=none dis=none) header.from=suse.com Authentication-Results: mail.kernel.org; spf=none smtp.mailfrom=xen-devel-bounces@lists.xenproject.org Received: from localhost ([127.0.0.1] helo=lists.xenproject.org) by lists.xenproject.org with esmtp (Exim 4.89) (envelope-from ) id 1i3HWx-0003gC-6x; Thu, 29 Aug 2019 10:19:51 +0000 Received: from all-amaz-eas1.inumbo.com ([34.197.232.57] helo=us1-amaz-eas2.inumbo.com) by lists.xenproject.org with esmtp (Exim 4.89) (envelope-from ) id 1i3HWv-0003fP-N9 for xen-devel@lists.xenproject.org; Thu, 29 Aug 2019 10:19:49 +0000 X-Inumbo-ID: 7b82d4da-ca46-11e9-ae64-12813bfff9fa Received: from mx1.suse.de (unknown [195.135.220.15]) by us1-amaz-eas2.inumbo.com (Halon) with ESMTPS id 7b82d4da-ca46-11e9-ae64-12813bfff9fa; Thu, 29 Aug 2019 10:19:29 +0000 (UTC) X-Virus-Scanned: by amavisd-new at test-mx.suse.de Received: from relay2.suse.de (unknown [195.135.220.254]) by mx1.suse.de (Postfix) with ESMTP id B7826AF58; Thu, 29 Aug 2019 10:19:27 +0000 (UTC) From: Juergen Gross To: xen-devel@lists.xenproject.org Date: Thu, 29 Aug 2019 12:18:45 +0200 Message-Id: <20190829101846.21369-5-jgross@suse.com> X-Mailer: git-send-email 2.16.4 In-Reply-To: <20190829101846.21369-1-jgross@suse.com> References: <20190829101846.21369-1-jgross@suse.com> Subject: [Xen-devel] [PATCH v3 4/5] xen: modify lock profiling interface X-BeenThere: xen-devel@lists.xenproject.org X-Mailman-Version: 2.1.23 Precedence: list List-Id: Xen developer discussion List-Unsubscribe: , List-Post: List-Help: List-Subscribe: , Cc: Juergen Gross , Stefano Stabellini , Wei Liu , Konrad Rzeszutek Wilk , George Dunlap , Andrew Cooper , Ian Jackson , Tim Deegan , Julien Grall , Jan Beulich MIME-Version: 1.0 Errors-To: xen-devel-bounces@lists.xenproject.org Sender: "Xen-devel" Today adding locks located in a struct to lock profiling requires a unique type index for each structure. This makes it hard to add any new structure as the related sysctl interface needs to be changed, too. Instead of using an index the already existing struct name specified in lock_profile_register_struct() can be used as an identifier instead. Modify the sysctl interface to use the struct name instead of the type index and adapt the related coding accordingly. Instead of an array of struct anchors for lock profiling we now use a linked list for that purpose. Use the special idx value -1 for cases where the idx isn't relevant (global locks) and shouldn't be printed. Increment the sysctl.h interface version as an interface is being modified. Add the missing setting of profiling time to xc_lockprof_query(). Add freeing element data when deregistering a structure. Signed-off-by: Juergen Gross --- V2: - add comment about memory allocation needs (Jan Beulich) - check success of memory allocation (Jan Beulich) --- tools/libxc/xc_misc.c | 1 + tools/misc/xenlockprof.c | 17 ++------ xen/common/domain.c | 4 +- xen/common/spinlock.c | 95 ++++++++++++++++++++++++++++++++------------- xen/include/public/sysctl.h | 11 ++---- xen/include/xen/spinlock.h | 29 ++++++++------ 6 files changed, 95 insertions(+), 62 deletions(-) diff --git a/tools/libxc/xc_misc.c b/tools/libxc/xc_misc.c index 8e60b6e9f0..22708f1b1f 100644 --- a/tools/libxc/xc_misc.c +++ b/tools/libxc/xc_misc.c @@ -533,6 +533,7 @@ int xc_lockprof_query(xc_interface *xch, rc = do_sysctl(xch, &sysctl); *n_elems = sysctl.u.lockprof_op.nr_elem; + *time = sysctl.u.lockprof_op.time; return rc; } diff --git a/tools/misc/xenlockprof.c b/tools/misc/xenlockprof.c index 11f43a35e3..c6aa3fe841 100644 --- a/tools/misc/xenlockprof.c +++ b/tools/misc/xenlockprof.c @@ -88,19 +88,10 @@ int main(int argc, char *argv[]) sb = 0; for ( j = 0; j < i; j++ ) { - switch ( data[j].type ) - { - case LOCKPROF_TYPE_GLOBAL: - sprintf(name, "global lock %s", data[j].name); - break; - case LOCKPROF_TYPE_PERDOM: - sprintf(name, "domain %d lock %s", data[j].idx, data[j].name); - break; - default: - sprintf(name, "unknown type(%d) %d lock %s", data[j].type, - data[j].idx, data[j].name); - break; - } + if ( data[j].idx == LOCKPROF_IDX_NONE ) + sprintf(name, "%s %s", data[j].type, data[j].name); + else + sprintf(name, "%s %d %s", data[j].type, data[j].idx, data[j].name); l = (double)(data[j].lock_time) / 1E+09; b = (double)(data[j].block_time) / 1E+09; sl += l; diff --git a/xen/common/domain.c b/xen/common/domain.c index 1f7a449f14..84749a28c8 100644 --- a/xen/common/domain.c +++ b/xen/common/domain.c @@ -290,7 +290,7 @@ static void _domain_destroy(struct domain *d) xsm_free_security_domain(d); - lock_profile_deregister_struct(LOCKPROF_TYPE_PERDOM, d); + lock_profile_deregister_struct(d, "Domain"); free_domain_struct(d); } @@ -369,7 +369,7 @@ struct domain *domain_create(domid_t domid, d->max_vcpus = config->max_vcpus; } - lock_profile_register_struct(LOCKPROF_TYPE_PERDOM, d, domid, "Domain"); + lock_profile_register_struct(d, domid, "Domain"); if ( (err = xsm_alloc_security_domain(d)) != 0 ) goto fail; diff --git a/xen/common/spinlock.c b/xen/common/spinlock.c index c4f706c627..cbde896ae6 100644 --- a/xen/common/spinlock.c +++ b/xen/common/spinlock.c @@ -330,42 +330,43 @@ void _spin_unlock_recursive(spinlock_t *lock) #ifdef CONFIG_DEBUG_LOCK_PROFILE struct lock_profile_anc { + struct lock_profile_anc *next; /* next type */ struct lock_profile_qhead *head_q; /* first head of this type */ - char *name; /* descriptive string for print */ + const char *name; /* descriptive string for print */ }; typedef void lock_profile_subfunc( - struct lock_profile *, int32_t, int32_t, void *); + struct lock_profile *, const char *, int32_t, void *); extern struct lock_profile *__lock_profile_start; extern struct lock_profile *__lock_profile_end; static s_time_t lock_profile_start; -static struct lock_profile_anc lock_profile_ancs[LOCKPROF_TYPE_N]; +static struct lock_profile_anc *lock_profile_ancs; static struct lock_profile_qhead lock_profile_glb_q; static spinlock_t lock_profile_lock = SPIN_LOCK_UNLOCKED; static void spinlock_profile_iterate(lock_profile_subfunc *sub, void *par) { - int i; + struct lock_profile_anc *anc; struct lock_profile_qhead *hq; struct lock_profile *eq; spin_lock(&lock_profile_lock); - for ( i = 0; i < LOCKPROF_TYPE_N; i++ ) - for ( hq = lock_profile_ancs[i].head_q; hq; hq = hq->head_q ) + for ( anc = lock_profile_ancs; anc; anc = anc->next ) + for ( hq = anc->head_q; hq; hq = hq->head_q ) for ( eq = hq->elem_q; eq; eq = eq->next ) - sub(eq, i, hq->idx, par); + sub(eq, anc->name, hq->idx, par); spin_unlock(&lock_profile_lock); } static void spinlock_profile_print_elem(struct lock_profile *data, - int32_t type, int32_t idx, void *par) + const char *type, int32_t idx, void *par) { struct spinlock *lock = data->lock; - printk("%s ", lock_profile_ancs[type].name); - if ( type != LOCKPROF_TYPE_GLOBAL ) + printk("%s ", type); + if ( idx != LOCKPROF_IDX_NONE ) printk("%d ", idx); printk("%s: addr=%p, lockval=%08x, ", data->name, lock, lock->tickets.head_tail); @@ -389,7 +390,7 @@ void spinlock_profile_printall(unsigned char key) } static void spinlock_profile_reset_elem(struct lock_profile *data, - int32_t type, int32_t idx, void *par) + const char *type, int32_t idx, void *par) { data->lock_cnt = 0; data->block_cnt = 0; @@ -413,7 +414,7 @@ typedef struct { } spinlock_profile_ucopy_t; static void spinlock_profile_ucopy_elem(struct lock_profile *data, - int32_t type, int32_t idx, void *par) + const char *type, int32_t idx, void *par) { spinlock_profile_ucopy_t *p = par; struct xen_sysctl_lockprof_data elem; @@ -424,7 +425,7 @@ static void spinlock_profile_ucopy_elem(struct lock_profile *data, if ( p->pc->nr_elem < p->pc->max_elem ) { safe_strcpy(elem.name, data->name); - elem.type = type; + safe_strcpy(elem.type, type); elem.idx = idx; elem.lock_cnt = data->lock_cnt; elem.block_cnt = data->block_cnt; @@ -465,31 +466,70 @@ int spinlock_profile_control(struct xen_sysctl_lockprof_op *pc) return rc; } -void _lock_profile_register_struct( - int32_t type, struct lock_profile_qhead *qhead, int32_t idx, char *name) +static struct lock_profile_anc *find_prof_anc(const char *name) { - qhead->idx = idx; + struct lock_profile_anc *anc; + + for ( anc = lock_profile_ancs; anc; anc = anc->next ) + if ( !strcmp(anc->name, name) ) + return anc; + return NULL; +} + +void _lock_profile_register_struct(struct lock_profile_qhead *qhead, + int32_t idx, const char *name) +{ + struct lock_profile_anc *anc; + spin_lock(&lock_profile_lock); - qhead->head_q = lock_profile_ancs[type].head_q; - lock_profile_ancs[type].head_q = qhead; - lock_profile_ancs[type].name = name; + + anc = find_prof_anc(name); + if ( !anc ) + { + anc = xzalloc(struct lock_profile_anc); + if ( !anc ) + goto out; + anc->name = name; + anc->next = lock_profile_ancs; + lock_profile_ancs = anc; + } + + qhead->idx = idx; + qhead->head_q = anc->head_q; + anc->head_q = qhead; + + out: spin_unlock(&lock_profile_lock); } -void _lock_profile_deregister_struct( - int32_t type, struct lock_profile_qhead *qhead) +void _lock_profile_deregister_struct(struct lock_profile_qhead *qhead, + const char *name) { + struct lock_profile_anc *anc; struct lock_profile_qhead **q; + struct lock_profile *elem; spin_lock(&lock_profile_lock); - for ( q = &lock_profile_ancs[type].head_q; *q; q = &(*q)->head_q ) + + anc = find_prof_anc(name); + if ( anc ) { - if ( *q == qhead ) + for ( q = &anc->head_q; *q; q = &(*q)->head_q ) { - *q = qhead->head_q; - break; + if ( *q == qhead ) + { + *q = qhead->head_q; + while ( qhead->elem_q ) + { + elem = qhead->elem_q; + qhead->elem_q = elem->next; + xfree(elem); + } + break; + } } } + spin_unlock(&lock_profile_lock); } @@ -504,9 +544,8 @@ static int __init lock_prof_init(void) (*q)->lock->profile = *q; } - _lock_profile_register_struct( - LOCKPROF_TYPE_GLOBAL, &lock_profile_glb_q, - 0, "Global lock"); + _lock_profile_register_struct(&lock_profile_glb_q, LOCKPROF_IDX_NONE, + "Global"); return 0; } diff --git a/xen/include/public/sysctl.h b/xen/include/public/sysctl.h index 91c48dcae0..fafce64f02 100644 --- a/xen/include/public/sysctl.h +++ b/xen/include/public/sysctl.h @@ -35,7 +35,7 @@ #include "domctl.h" #include "physdev.h" -#define XEN_SYSCTL_INTERFACE_VERSION 0x00000012 +#define XEN_SYSCTL_INTERFACE_VERSION 0x00000013 /* * Read console content from Xen buffer ring. @@ -433,14 +433,11 @@ struct xen_sysctl_page_offline_op { /* Sub-operations: */ #define XEN_SYSCTL_LOCKPROF_reset 1 /* Reset all profile data to zero. */ #define XEN_SYSCTL_LOCKPROF_query 2 /* Get lock profile information. */ -/* Record-type: */ -#define LOCKPROF_TYPE_GLOBAL 0 /* global lock, idx meaningless */ -#define LOCKPROF_TYPE_PERDOM 1 /* per-domain lock, idx is domid */ -#define LOCKPROF_TYPE_N 2 /* number of types */ struct xen_sysctl_lockprof_data { - char name[40]; /* lock name (may include up to 2 %d specifiers) */ - int32_t type; /* LOCKPROF_TYPE_??? */ + char name[40]; /* lock name */ + char type[20]; /* e.g. "domain" */ int32_t idx; /* index (e.g. domain id) */ +#define LOCKPROF_IDX_NONE -1 uint64_aligned_t lock_cnt; /* # of locking succeeded */ uint64_aligned_t block_cnt; /* # of wait for lock */ uint64_aligned_t lock_time; /* nsecs lock held */ diff --git a/xen/include/xen/spinlock.h b/xen/include/xen/spinlock.h index a7c1967ec7..decaddd9fc 100644 --- a/xen/include/xen/spinlock.h +++ b/xen/include/xen/spinlock.h @@ -50,19 +50,24 @@ union lock_debug { }; with ptr being the main structure pointer and lock the spinlock field + It should be noted that this will need to allocate memory, so interrupts + must be enabled. + - each structure has to be added to profiling with - lock_profile_register_struct(type, ptr, idx, print); + lock_profile_register_struct(ptr, idx, print); with: - type: something like LOCKPROF_TYPE_PERDOM ptr: pointer to the structure idx: index of that structure, e.g. domid print: descriptive string like "domain" + It should be noted that this will might need to allocate memory, so + interrupts must be enabled. + - removing of a structure is done via - lock_profile_deregister_struct(type, ptr); + lock_profile_deregister_struct(ptr, print); */ struct spinlock; @@ -108,14 +113,14 @@ struct lock_profile_qhead { (s)->profile_head.elem_q = prof; \ } while(0) -void _lock_profile_register_struct( - int32_t, struct lock_profile_qhead *, int32_t, char *); -void _lock_profile_deregister_struct(int32_t, struct lock_profile_qhead *); +void _lock_profile_register_struct(struct lock_profile_qhead *, int32_t, + const char *); +void _lock_profile_deregister_struct(struct lock_profile_qhead *, const char *); -#define lock_profile_register_struct(type, ptr, idx, print) \ - _lock_profile_register_struct(type, &((ptr)->profile_head), idx, print) -#define lock_profile_deregister_struct(type, ptr) \ - _lock_profile_deregister_struct(type, &((ptr)->profile_head)) +#define lock_profile_register_struct(ptr, idx, print) \ + _lock_profile_register_struct(&((ptr)->profile_head), idx, print) +#define lock_profile_deregister_struct(ptr, print) \ + _lock_profile_deregister_struct(&((ptr)->profile_head), print) extern int spinlock_profile_control(struct xen_sysctl_lockprof_op *pc); extern void spinlock_profile_printall(unsigned char key); @@ -129,8 +134,8 @@ struct lock_profile_qhead { }; #define DEFINE_SPINLOCK(l) spinlock_t l = SPIN_LOCK_UNLOCKED #define spin_lock_init_prof(s, l) spin_lock_init(&((s)->l)) -#define lock_profile_register_struct(type, ptr, idx, print) -#define lock_profile_deregister_struct(type, ptr) +#define lock_profile_register_struct(ptr, idx, print) +#define lock_profile_deregister_struct(ptr, print) #define spinlock_profile_printall(key) #endif From patchwork Thu Aug 29 10:18:46 2019 Content-Type: text/plain; charset="utf-8" MIME-Version: 1.0 Content-Transfer-Encoding: 7bit X-Patchwork-Submitter: =?utf-8?b?SsO8cmdlbiBHcm/Dnw==?= X-Patchwork-Id: 11120859 Return-Path: Received: from mail.kernel.org (pdx-korg-mail-1.web.codeaurora.org [172.30.200.123]) by pdx-korg-patchwork-2.web.codeaurora.org (Postfix) with ESMTP id 1C8191395 for ; Thu, 29 Aug 2019 10:21:32 +0000 (UTC) Received: from lists.xenproject.org (lists.xenproject.org [192.237.175.120]) (using TLSv1.2 with cipher ECDHE-RSA-AES256-GCM-SHA384 (256/256 bits)) (No client certificate requested) by mail.kernel.org (Postfix) with ESMTPS id 035B222CF5 for ; Thu, 29 Aug 2019 10:21:32 +0000 (UTC) DMARC-Filter: OpenDMARC Filter v1.3.2 mail.kernel.org 035B222CF5 Authentication-Results: mail.kernel.org; dmarc=none (p=none dis=none) header.from=suse.com Authentication-Results: mail.kernel.org; spf=none smtp.mailfrom=xen-devel-bounces@lists.xenproject.org Received: from localhost ([127.0.0.1] helo=lists.xenproject.org) by lists.xenproject.org with esmtp (Exim 4.89) (envelope-from ) id 1i3HWr-0003da-UR; Thu, 29 Aug 2019 10:19:45 +0000 Received: from all-amaz-eas1.inumbo.com ([34.197.232.57] helo=us1-amaz-eas2.inumbo.com) by lists.xenproject.org with esmtp (Exim 4.89) (envelope-from ) id 1i3HWq-0003cz-N4 for xen-devel@lists.xenproject.org; Thu, 29 Aug 2019 10:19:44 +0000 X-Inumbo-ID: 7b82d4dd-ca46-11e9-ae64-12813bfff9fa Received: from mx1.suse.de (unknown [195.135.220.15]) by us1-amaz-eas2.inumbo.com (Halon) with ESMTPS id 7b82d4dd-ca46-11e9-ae64-12813bfff9fa; Thu, 29 Aug 2019 10:19:29 +0000 (UTC) X-Virus-Scanned: by amavisd-new at test-mx.suse.de Received: from relay2.suse.de (unknown [195.135.220.254]) by mx1.suse.de (Postfix) with ESMTP id C135AB0B3; Thu, 29 Aug 2019 10:19:27 +0000 (UTC) From: Juergen Gross To: xen-devel@lists.xenproject.org Date: Thu, 29 Aug 2019 12:18:46 +0200 Message-Id: <20190829101846.21369-6-jgross@suse.com> X-Mailer: git-send-email 2.16.4 In-Reply-To: <20190829101846.21369-1-jgross@suse.com> References: <20190829101846.21369-1-jgross@suse.com> Subject: [Xen-devel] [PATCH v3 5/5] xen: modify several static locks to unique names X-BeenThere: xen-devel@lists.xenproject.org X-Mailman-Version: 2.1.23 Precedence: list List-Id: Xen developer discussion List-Unsubscribe: , List-Post: List-Help: List-Subscribe: , Cc: Juergen Gross , Stefano Stabellini , Wei Liu , Konrad Rzeszutek Wilk , George Dunlap , Andrew Cooper , Ian Jackson , Tim Deegan , Julien Grall , Jan Beulich , =?utf-8?q?Roger_Pau_Monn=C3=A9?= MIME-Version: 1.0 Errors-To: xen-devel-bounces@lists.xenproject.org Sender: "Xen-devel" In order to have unique names when doing lock profiling several local locks "lock" need to be renamed. Signed-off-by: Juergen Gross --- xen/arch/x86/irq.c | 6 +++--- xen/arch/x86/time.c | 6 +++--- xen/common/keyhandler.c | 6 +++--- xen/common/perfc.c | 6 +++--- xen/common/trace.c | 6 +++--- xen/drivers/char/console.c | 6 +++--- 6 files changed, 18 insertions(+), 18 deletions(-) diff --git a/xen/arch/x86/irq.c b/xen/arch/x86/irq.c index 0ee33464d2..c348f9e6c9 100644 --- a/xen/arch/x86/irq.c +++ b/xen/arch/x86/irq.c @@ -867,14 +867,14 @@ void set_direct_apic_vector( void alloc_direct_apic_vector( uint8_t *vector, void (*handler)(struct cpu_user_regs *)) { - static DEFINE_SPINLOCK(lock); + static DEFINE_SPINLOCK(apic_alloc_lock); - spin_lock(&lock); + spin_lock(&apic_alloc_lock); if (*vector == 0) { *vector = alloc_hipriority_vector(); set_direct_apic_vector(*vector, handler); } - spin_unlock(&lock); + spin_unlock(&apic_alloc_lock); } void do_IRQ(struct cpu_user_regs *regs) diff --git a/xen/arch/x86/time.c b/xen/arch/x86/time.c index d8242295ef..9e8f3e65b1 100644 --- a/xen/arch/x86/time.c +++ b/xen/arch/x86/time.c @@ -1426,9 +1426,9 @@ static void tsc_check_slave(void *unused) static void tsc_check_reliability(void) { unsigned int cpu = smp_processor_id(); - static DEFINE_SPINLOCK(lock); + static DEFINE_SPINLOCK(tsc_check_lock); - spin_lock(&lock); + spin_lock(&tsc_check_lock); tsc_check_count++; smp_call_function(tsc_check_slave, NULL, 0); @@ -1439,7 +1439,7 @@ static void tsc_check_reliability(void) while ( !cpumask_empty(&tsc_check_cpumask) ) cpu_relax(); - spin_unlock(&lock); + spin_unlock(&tsc_check_lock); } /* diff --git a/xen/common/keyhandler.c b/xen/common/keyhandler.c index c36baa4dff..6e4ff890db 100644 --- a/xen/common/keyhandler.c +++ b/xen/common/keyhandler.c @@ -371,9 +371,9 @@ static void read_clocks(unsigned char key) static u64 sumdif_stime = 0, maxdif_stime = 0; static u64 sumdif_cycles = 0, maxdif_cycles = 0; static u32 count = 0; - static DEFINE_SPINLOCK(lock); + static DEFINE_SPINLOCK(read_clocks_lock); - spin_lock(&lock); + spin_lock(&read_clocks_lock); smp_call_function(read_clocks_slave, NULL, 0); @@ -408,7 +408,7 @@ static void read_clocks(unsigned char key) min_cycles = per_cpu(read_cycles_time, min_cycles_cpu); max_cycles = per_cpu(read_cycles_time, max_cycles_cpu); - spin_unlock(&lock); + spin_unlock(&read_clocks_lock); dif_stime = max_stime - min_stime; if ( dif_stime > maxdif_stime ) diff --git a/xen/common/perfc.c b/xen/common/perfc.c index 3abe35892a..8a099fb0be 100644 --- a/xen/common/perfc.c +++ b/xen/common/perfc.c @@ -241,10 +241,10 @@ static int perfc_copy_info(XEN_GUEST_HANDLE_64(xen_sysctl_perfc_desc_t) desc, /* Dom0 control of perf counters */ int perfc_control(struct xen_sysctl_perfc_op *pc) { - static DEFINE_SPINLOCK(lock); + static DEFINE_SPINLOCK(perfc_control_lock); int rc; - spin_lock(&lock); + spin_lock(&perfc_control_lock); switch ( pc->cmd ) { @@ -262,7 +262,7 @@ int perfc_control(struct xen_sysctl_perfc_op *pc) break; } - spin_unlock(&lock); + spin_unlock(&perfc_control_lock); pc->nr_counters = NR_PERFCTRS; pc->nr_vals = perfc_nbr_vals; diff --git a/xen/common/trace.c b/xen/common/trace.c index d1ef81407b..a216bea00c 100644 --- a/xen/common/trace.c +++ b/xen/common/trace.c @@ -368,10 +368,10 @@ void __init init_trace_bufs(void) */ int tb_control(struct xen_sysctl_tbuf_op *tbc) { - static DEFINE_SPINLOCK(lock); + static DEFINE_SPINLOCK(tb_control_lock); int rc = 0; - spin_lock(&lock); + spin_lock(&tb_control_lock); switch ( tbc->cmd ) { @@ -432,7 +432,7 @@ int tb_control(struct xen_sysctl_tbuf_op *tbc) break; } - spin_unlock(&lock); + spin_unlock(&tb_control_lock); return rc; } diff --git a/xen/drivers/char/console.c b/xen/drivers/char/console.c index e133534be7..e713eeff8e 100644 --- a/xen/drivers/char/console.c +++ b/xen/drivers/char/console.c @@ -1168,7 +1168,7 @@ void panic(const char *fmt, ...) { va_list args; unsigned long flags; - static DEFINE_SPINLOCK(lock); + static DEFINE_SPINLOCK(panic_lock); static char buf[128]; spin_debug_disable(); @@ -1176,7 +1176,7 @@ void panic(const char *fmt, ...) debugtrace_dump(); /* Protects buf[] and ensure multi-line message prints atomically. */ - spin_lock_irqsave(&lock, flags); + spin_lock_irqsave(&panic_lock, flags); va_start(args, fmt); (void)vsnprintf(buf, sizeof(buf), fmt, args); @@ -1196,7 +1196,7 @@ void panic(const char *fmt, ...) printk("Reboot in five seconds...\n"); #endif - spin_unlock_irqrestore(&lock, flags); + spin_unlock_irqrestore(&panic_lock, flags); debugger_trap_immediate();