From patchwork Wed May 18 16:44:16 2022 Content-Type: text/plain; charset="utf-8" MIME-Version: 1.0 Content-Transfer-Encoding: 7bit X-Patchwork-Submitter: Sai Prakash Ranjan X-Patchwork-Id: 12853874 Return-Path: X-Spam-Checker-Version: SpamAssassin 3.4.0 (2014-02-07) on aws-us-west-2-korg-lkml-1.web.codeaurora.org Received: from vger.kernel.org (vger.kernel.org [23.128.96.18]) by smtp.lore.kernel.org (Postfix) with ESMTP id 9430EC433EF for ; Wed, 18 May 2022 16:45:41 +0000 (UTC) Received: (majordomo@vger.kernel.org) by vger.kernel.org via listexpand id S240489AbiERQpj (ORCPT ); Wed, 18 May 2022 12:45:39 -0400 Received: from lindbergh.monkeyblade.net ([23.128.96.19]:45990 "EHLO lindbergh.monkeyblade.net" rhost-flags-OK-OK-OK-OK) by vger.kernel.org with ESMTP id S240464AbiERQp0 (ORCPT ); Wed, 18 May 2022 12:45:26 -0400 Received: from alexa-out.qualcomm.com (alexa-out.qualcomm.com [129.46.98.28]) by lindbergh.monkeyblade.net (Postfix) with ESMTPS id 483A51FB541; Wed, 18 May 2022 09:45:25 -0700 (PDT) DKIM-Signature: v=1; a=rsa-sha256; c=relaxed/relaxed; d=quicinc.com; i=@quicinc.com; q=dns/txt; s=qcdkim; t=1652892325; x=1684428325; h=from:to:cc:subject:date:message-id:in-reply-to: references:mime-version:content-transfer-encoding; bh=LzT5i3bVI8oZ9ebhE63odydPlszLfAWR/dghJFtYORg=; b=jctteezqTZxZ/s+rSlKBnjH0+QSfzP5ij2yQmWn3CmZY2cHNzU3jYWgV nJybAI2kA/ulEFN00fv171VEdD6yhtPwX4OQAoudGfMHTzsxhnKfMsHT9 hJ4fns0djZ68othZWxdyDKHsvwSYmxk49Ky1ohy4G/AHjLUODfsDgwbd5 c=; Received: from ironmsg08-lv.qualcomm.com ([10.47.202.152]) by alexa-out.qualcomm.com with ESMTP; 18 May 2022 09:45:24 -0700 X-QCInternal: smtphost Received: from nasanex01c.na.qualcomm.com ([10.47.97.222]) by ironmsg08-lv.qualcomm.com with ESMTP/TLS/ECDHE-RSA-AES256-GCM-SHA384; 18 May 2022 09:45:23 -0700 Received: from nalasex01a.na.qualcomm.com (10.47.209.196) by nasanex01c.na.qualcomm.com (10.47.97.222) with Microsoft SMTP Server (version=TLS1_2, cipher=TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384) id 15.2.986.22; Wed, 18 May 2022 09:45:20 -0700 Received: from blr-ubuntu-253.qualcomm.com (10.80.80.8) by nalasex01a.na.qualcomm.com (10.47.209.196) with Microsoft SMTP Server (version=TLS1_2, cipher=TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384) id 15.2.986.22; Wed, 18 May 2022 09:45:15 -0700 From: Sai Prakash Ranjan To: , , CC: , , , , , , , , , , Sai Prakash Ranjan Subject: [PATCHv15 7/9] asm-generic/io: Add logging support for MMIO accessors Date: Wed, 18 May 2022 22:14:16 +0530 Message-ID: <26d896a5cd2ca84416d77d6621715d595a7462db.1652891705.git.quic_saipraka@quicinc.com> X-Mailer: git-send-email 2.33.1 In-Reply-To: References: MIME-Version: 1.0 X-Originating-IP: [10.80.80.8] X-ClientProxiedBy: nasanex01a.na.qualcomm.com (10.52.223.231) To nalasex01a.na.qualcomm.com (10.47.209.196) Precedence: bulk List-ID: X-Mailing-List: linux-arm-msm@vger.kernel.org Add logging support for MMIO high level accessors such as read{b,w,l,q} and their relaxed versions to aid in debugging unexpected crashes/hangs caused by the corresponding MMIO operation. Signed-off-by: Sai Prakash Ranjan --- include/asm-generic/io.h | 91 ++++++++++++++++++++++++++++++++++++++-- 1 file changed, 87 insertions(+), 4 deletions(-) diff --git a/include/asm-generic/io.h b/include/asm-generic/io.h index 7ce93aaf69f8..9c5114335a2d 100644 --- a/include/asm-generic/io.h +++ b/include/asm-generic/io.h @@ -10,6 +10,7 @@ #include /* I/O is all done through memory accesses */ #include /* for memset() and memcpy() */ #include +#include #ifdef CONFIG_GENERIC_IOMAP #include @@ -61,6 +62,44 @@ #define __io_par(v) __io_ar(v) #endif +/* + * "__DISABLE_TRACE_MMIO__" flag can be used to disable MMIO tracing for + * specific kernel drivers in case of excessive/unwanted logging. + * + * Usage: Add a #define flag at the beginning of the driver file. + * Ex: #define __DISABLE_TRACE_MMIO__ + * #include <...> + * ... + */ +#if IS_ENABLED(CONFIG_TRACE_MMIO_ACCESS) && !(defined(__DISABLE_TRACE_MMIO__)) +#include + +DECLARE_TRACEPOINT(rwmmio_write); +DECLARE_TRACEPOINT(rwmmio_post_write); +DECLARE_TRACEPOINT(rwmmio_read); +DECLARE_TRACEPOINT(rwmmio_post_read); + +void log_write_mmio(u64 val, u8 width, volatile void __iomem *addr, + unsigned long caller_addr); +void log_post_write_mmio(u64 val, u8 width, volatile void __iomem *addr, + unsigned long caller_addr); +void log_read_mmio(u8 width, const volatile void __iomem *addr, + unsigned long caller_addr); +void log_post_read_mmio(u64 val, u8 width, const volatile void __iomem *addr, + unsigned long caller_addr); + +#else + +static inline void log_write_mmio(u64 val, u8 width, volatile void __iomem *addr, + unsigned long caller_addr) {} +static inline void log_post_write_mmio(u64 val, u8 width, volatile void __iomem *addr, + unsigned long caller_addr) {} +static inline void log_read_mmio(u8 width, const volatile void __iomem *addr, + unsigned long caller_addr) {} +static inline void log_post_read_mmio(u64 val, u8 width, const volatile void __iomem *addr, + unsigned long caller_addr) {} + +#endif /* CONFIG_TRACE_MMIO_ACCESS */ /* * __raw_{read,write}{b,w,l,q}() access memory in native endianness. @@ -149,9 +188,11 @@ static inline u8 readb(const volatile void __iomem *addr) { u8 val; + log_read_mmio(8, addr, _THIS_IP_); __io_br(); val = __raw_readb(addr); __io_ar(val); + log_post_read_mmio(val, 8, addr, _THIS_IP_); return val; } #endif @@ -162,9 +203,11 @@ static inline u16 readw(const volatile void __iomem *addr) { u16 val; + log_read_mmio(16, addr, _THIS_IP_); __io_br(); val = __le16_to_cpu((__le16 __force)__raw_readw(addr)); __io_ar(val); + log_post_read_mmio(val, 16, addr, _THIS_IP_); return val; } #endif @@ -175,9 +218,11 @@ static inline u32 readl(const volatile void __iomem *addr) { u32 val; + log_read_mmio(32, addr, _THIS_IP_); __io_br(); val = __le32_to_cpu((__le32 __force)__raw_readl(addr)); __io_ar(val); + log_post_read_mmio(val, 32, addr, _THIS_IP_); return val; } #endif @@ -189,9 +234,11 @@ static inline u64 readq(const volatile void __iomem *addr) { u64 val; + log_read_mmio(64, addr, _THIS_IP_); __io_br(); val = __le64_to_cpu(__raw_readq(addr)); __io_ar(val); + log_post_read_mmio(val, 64, addr, _THIS_IP_); return val; } #endif @@ -201,9 +248,11 @@ static inline u64 readq(const volatile void __iomem *addr) #define writeb writeb static inline void writeb(u8 value, volatile void __iomem *addr) { + log_write_mmio(value, 8, addr, _THIS_IP_); __io_bw(); __raw_writeb(value, addr); __io_aw(); + log_post_write_mmio(value, 8, addr, _THIS_IP_); } #endif @@ -211,9 +260,11 @@ static inline void writeb(u8 value, volatile void __iomem *addr) #define writew writew static inline void writew(u16 value, volatile void __iomem *addr) { + log_write_mmio(value, 16, addr, _THIS_IP_); __io_bw(); __raw_writew((u16 __force)cpu_to_le16(value), addr); __io_aw(); + log_post_write_mmio(value, 16, addr, _THIS_IP_); } #endif @@ -221,9 +272,11 @@ static inline void writew(u16 value, volatile void __iomem *addr) #define writel writel static inline void writel(u32 value, volatile void __iomem *addr) { + log_write_mmio(value, 32, addr, _THIS_IP_); __io_bw(); __raw_writel((u32 __force)__cpu_to_le32(value), addr); __io_aw(); + log_post_write_mmio(value, 32, addr, _THIS_IP_); } #endif @@ -232,9 +285,11 @@ static inline void writel(u32 value, volatile void __iomem *addr) #define writeq writeq static inline void writeq(u64 value, volatile void __iomem *addr) { + log_write_mmio(value, 64, addr, _THIS_IP_); __io_bw(); __raw_writeq(__cpu_to_le64(value), addr); __io_aw(); + log_post_write_mmio(value, 64, addr, _THIS_IP_); } #endif #endif /* CONFIG_64BIT */ @@ -248,7 +303,12 @@ static inline void writeq(u64 value, volatile void __iomem *addr) #define readb_relaxed readb_relaxed static inline u8 readb_relaxed(const volatile void __iomem *addr) { - return __raw_readb(addr); + u8 val; + + log_read_mmio(8, addr, _THIS_IP_); + val = __raw_readb(addr); + log_post_read_mmio(val, 8, addr, _THIS_IP_); + return val; } #endif @@ -256,7 +316,12 @@ static inline u8 readb_relaxed(const volatile void __iomem *addr) #define readw_relaxed readw_relaxed static inline u16 readw_relaxed(const volatile void __iomem *addr) { - return __le16_to_cpu(__raw_readw(addr)); + u16 val; + + log_read_mmio(16, addr, _THIS_IP_); + val = __le16_to_cpu(__raw_readw(addr)); + log_post_read_mmio(val, 16, addr, _THIS_IP_); + return val; } #endif @@ -264,7 +329,12 @@ static inline u16 readw_relaxed(const volatile void __iomem *addr) #define readl_relaxed readl_relaxed static inline u32 readl_relaxed(const volatile void __iomem *addr) { - return __le32_to_cpu(__raw_readl(addr)); + u32 val; + + log_read_mmio(32, addr, _THIS_IP_); + val = __le32_to_cpu(__raw_readl(addr)); + log_post_read_mmio(val, 32, addr, _THIS_IP_); + return val; } #endif @@ -272,7 +342,12 @@ static inline u32 readl_relaxed(const volatile void __iomem *addr) #define readq_relaxed readq_relaxed static inline u64 readq_relaxed(const volatile void __iomem *addr) { - return __le64_to_cpu(__raw_readq(addr)); + u64 val; + + log_read_mmio(64, addr, _THIS_IP_); + val = __le64_to_cpu(__raw_readq(addr)); + log_post_read_mmio(val, 64, addr, _THIS_IP_); + return val; } #endif @@ -280,7 +355,9 @@ static inline u64 readq_relaxed(const volatile void __iomem *addr) #define writeb_relaxed writeb_relaxed static inline void writeb_relaxed(u8 value, volatile void __iomem *addr) { + log_write_mmio(value, 8, addr, _THIS_IP_); __raw_writeb(value, addr); + log_post_write_mmio(value, 8, addr, _THIS_IP_); } #endif @@ -288,7 +365,9 @@ static inline void writeb_relaxed(u8 value, volatile void __iomem *addr) #define writew_relaxed writew_relaxed static inline void writew_relaxed(u16 value, volatile void __iomem *addr) { + log_write_mmio(value, 16, addr, _THIS_IP_); __raw_writew(cpu_to_le16(value), addr); + log_post_write_mmio(value, 16, addr, _THIS_IP_); } #endif @@ -296,7 +375,9 @@ static inline void writew_relaxed(u16 value, volatile void __iomem *addr) #define writel_relaxed writel_relaxed static inline void writel_relaxed(u32 value, volatile void __iomem *addr) { + log_write_mmio(value, 32, addr, _THIS_IP_); __raw_writel(__cpu_to_le32(value), addr); + log_post_write_mmio(value, 32, addr, _THIS_IP_); } #endif @@ -304,7 +385,9 @@ static inline void writel_relaxed(u32 value, volatile void __iomem *addr) #define writeq_relaxed writeq_relaxed static inline void writeq_relaxed(u64 value, volatile void __iomem *addr) { + log_write_mmio(value, 64, addr, _THIS_IP_); __raw_writeq(__cpu_to_le64(value), addr); + log_post_write_mmio(value, 64, addr, _THIS_IP_); } #endif