diff mbox series

coresight: etm3x: convert struct etm_drvdata's spinlock to raw_spinlock

Message ID 20230711070536.3944458-1-quanyang.wang@windriver.com (mailing list archive)
State New, archived
Headers show
Series coresight: etm3x: convert struct etm_drvdata's spinlock to raw_spinlock | expand

Commit Message

Quanyang Wang July 11, 2023, 7:05 a.m. UTC
From: Quanyang Wang <quanyang.wang@windriver.com>

For PREEMPT_RT kernel, spinlock_t locks become sleepable. The functions
etm_dying_cpu and etm_starting_cpu which call spin_lock/unlock run in
an irq-disabled context, this will trigger the following calltrace:

    BUG: sleeping function called from invalid context at kernel/locking/spinlock_rt.c:46
    in_atomic(): 1, irqs_disabled(): 128, non_block: 0, pid: 25, name: migration/1
    preempt_count: 1, expected: 0
    RCU nest depth: 0, expected: 0
    1 lock held by migration/1/25:
     #0: 82a7587c (&drvdata->spinlock){....}-{2:2}, at: etm_dying_cpu+0x28/0x54
    Preemption disabled at:
    [<801ec760>] cpu_stopper_thread+0x94/0x120
    CPU: 1 PID: 25 Comm: migration/1 Not tainted 6.1.35-rt10-yocto-preempt-rt #30
    Hardware name: Xilinx Zynq Platform
    Stopper: multi_cpu_stop+0x0/0x174 <- __stop_cpus.constprop.0+0x48/0x88
     unwind_backtrace from show_stack+0x18/0x1c
     show_stack from dump_stack_lvl+0x58/0x70
     dump_stack_lvl from __might_resched+0x14c/0x1c0
     __might_resched from rt_spin_lock+0x4c/0x84
     rt_spin_lock from etm_dying_cpu+0x28/0x54
     etm_dying_cpu from cpuhp_invoke_callback+0x140/0x33c
     cpuhp_invoke_callback from __cpuhp_invoke_callback_range+0xa4/0x104
     __cpuhp_invoke_callback_range from take_cpu_down+0x7c/0xa8
     take_cpu_down from multi_cpu_stop+0x15c/0x174
     multi_cpu_stop from cpu_stopper_thread+0x9c/0x120
     cpu_stopper_thread from smpboot_thread_fn+0x31c/0x360
     smpboot_thread_fn from kthread+0x100/0x124
     kthread from ret_from_fork+0x14/0x2c

Convert struct etm_drvdata's spinlock to raw_spinlock to fix it.

Fixes: 97fe626ce64c ("coresight: etm3x: Allow etm3x to be built as a module")
Cc: stable@vger.kernel.org
Signed-off-by: Quanyang Wang <quanyang.wang@windriver.com>
---
 drivers/hwtracing/coresight/coresight-etm.h   |   2 +-
 .../coresight/coresight-etm3x-core.c          |  18 +--
 .../coresight/coresight-etm3x-sysfs.c         | 130 +++++++++---------
 3 files changed, 75 insertions(+), 75 deletions(-)

Comments

Greg KH July 11, 2023, 2:05 p.m. UTC | #1
On Tue, Jul 11, 2023 at 03:05:36PM +0800, quanyang.wang@windriver.com wrote:
> From: Quanyang Wang <quanyang.wang@windriver.com>
> 
> For PREEMPT_RT kernel, spinlock_t locks become sleepable. The functions
> etm_dying_cpu and etm_starting_cpu which call spin_lock/unlock run in
> an irq-disabled context, this will trigger the following calltrace:
> 
>     BUG: sleeping function called from invalid context at kernel/locking/spinlock_rt.c:46
>     in_atomic(): 1, irqs_disabled(): 128, non_block: 0, pid: 25, name: migration/1
>     preempt_count: 1, expected: 0
>     RCU nest depth: 0, expected: 0
>     1 lock held by migration/1/25:
>      #0: 82a7587c (&drvdata->spinlock){....}-{2:2}, at: etm_dying_cpu+0x28/0x54
>     Preemption disabled at:
>     [<801ec760>] cpu_stopper_thread+0x94/0x120
>     CPU: 1 PID: 25 Comm: migration/1 Not tainted 6.1.35-rt10-yocto-preempt-rt #30
>     Hardware name: Xilinx Zynq Platform
>     Stopper: multi_cpu_stop+0x0/0x174 <- __stop_cpus.constprop.0+0x48/0x88
>      unwind_backtrace from show_stack+0x18/0x1c
>      show_stack from dump_stack_lvl+0x58/0x70
>      dump_stack_lvl from __might_resched+0x14c/0x1c0
>      __might_resched from rt_spin_lock+0x4c/0x84
>      rt_spin_lock from etm_dying_cpu+0x28/0x54
>      etm_dying_cpu from cpuhp_invoke_callback+0x140/0x33c
>      cpuhp_invoke_callback from __cpuhp_invoke_callback_range+0xa4/0x104
>      __cpuhp_invoke_callback_range from take_cpu_down+0x7c/0xa8
>      take_cpu_down from multi_cpu_stop+0x15c/0x174
>      multi_cpu_stop from cpu_stopper_thread+0x9c/0x120
>      cpu_stopper_thread from smpboot_thread_fn+0x31c/0x360
>      smpboot_thread_fn from kthread+0x100/0x124
>      kthread from ret_from_fork+0x14/0x2c
> 
> Convert struct etm_drvdata's spinlock to raw_spinlock to fix it.

wait, why will a raw_spinlock fix this?  Why not fix the root problem
here, that of calling these locks inproperly in irq context?

How is changing to a raw_spinlock going to fix the above splat?

thanks,

greg k-h
James Clark July 11, 2023, 3:45 p.m. UTC | #2
On 11/07/2023 15:05, Greg Kroah-Hartman wrote:
> On Tue, Jul 11, 2023 at 03:05:36PM +0800, quanyang.wang@windriver.com wrote:
>> From: Quanyang Wang <quanyang.wang@windriver.com>
>>
>> For PREEMPT_RT kernel, spinlock_t locks become sleepable. The functions
>> etm_dying_cpu and etm_starting_cpu which call spin_lock/unlock run in
>> an irq-disabled context, this will trigger the following calltrace:
>>
>>     BUG: sleeping function called from invalid context at kernel/locking/spinlock_rt.c:46
>>     in_atomic(): 1, irqs_disabled(): 128, non_block: 0, pid: 25, name: migration/1
>>     preempt_count: 1, expected: 0
>>     RCU nest depth: 0, expected: 0
>>     1 lock held by migration/1/25:
>>      #0: 82a7587c (&drvdata->spinlock){....}-{2:2}, at: etm_dying_cpu+0x28/0x54
>>     Preemption disabled at:
>>     [<801ec760>] cpu_stopper_thread+0x94/0x120
>>     CPU: 1 PID: 25 Comm: migration/1 Not tainted 6.1.35-rt10-yocto-preempt-rt #30
>>     Hardware name: Xilinx Zynq Platform
>>     Stopper: multi_cpu_stop+0x0/0x174 <- __stop_cpus.constprop.0+0x48/0x88
>>      unwind_backtrace from show_stack+0x18/0x1c
>>      show_stack from dump_stack_lvl+0x58/0x70
>>      dump_stack_lvl from __might_resched+0x14c/0x1c0
>>      __might_resched from rt_spin_lock+0x4c/0x84
>>      rt_spin_lock from etm_dying_cpu+0x28/0x54
>>      etm_dying_cpu from cpuhp_invoke_callback+0x140/0x33c
>>      cpuhp_invoke_callback from __cpuhp_invoke_callback_range+0xa4/0x104
>>      __cpuhp_invoke_callback_range from take_cpu_down+0x7c/0xa8
>>      take_cpu_down from multi_cpu_stop+0x15c/0x174
>>      multi_cpu_stop from cpu_stopper_thread+0x9c/0x120
>>      cpu_stopper_thread from smpboot_thread_fn+0x31c/0x360
>>      smpboot_thread_fn from kthread+0x100/0x124
>>      kthread from ret_from_fork+0x14/0x2c
>>
>> Convert struct etm_drvdata's spinlock to raw_spinlock to fix it.
> 
> wait, why will a raw_spinlock fix this?  Why not fix the root problem
> here, that of calling these locks inproperly in irq context?
> 
> How is changing to a raw_spinlock going to fix the above splat?
> 
> thanks,
> 
> greg k-h
> 

If it's just etm_starting_cpu() and etm_dying_cpu() as mentioned in the
commit message then can those spinlocks be removed?

Surely there can't be any concurrent access to the per-cpu data when the
hotplug callbacks are called?

James

> _______________________________________________
> linux-arm-kernel mailing list
> linux-arm-kernel@lists.infradead.org
> http://lists.infradead.org/mailman/listinfo/linux-arm-kernel
Suzuki K Poulose July 12, 2023, 1:22 p.m. UTC | #3
On 11/07/2023 16:45, James Clark wrote:
> 
> 
> On 11/07/2023 15:05, Greg Kroah-Hartman wrote:
>> On Tue, Jul 11, 2023 at 03:05:36PM +0800, quanyang.wang@windriver.com wrote:
>>> From: Quanyang Wang <quanyang.wang@windriver.com>
>>>
>>> For PREEMPT_RT kernel, spinlock_t locks become sleepable. The functions
>>> etm_dying_cpu and etm_starting_cpu which call spin_lock/unlock run in
>>> an irq-disabled context, this will trigger the following calltrace:
>>>
>>>      BUG: sleeping function called from invalid context at kernel/locking/spinlock_rt.c:46
>>>      in_atomic(): 1, irqs_disabled(): 128, non_block: 0, pid: 25, name: migration/1
>>>      preempt_count: 1, expected: 0
>>>      RCU nest depth: 0, expected: 0
>>>      1 lock held by migration/1/25:
>>>       #0: 82a7587c (&drvdata->spinlock){....}-{2:2}, at: etm_dying_cpu+0x28/0x54
>>>      Preemption disabled at:
>>>      [<801ec760>] cpu_stopper_thread+0x94/0x120
>>>      CPU: 1 PID: 25 Comm: migration/1 Not tainted 6.1.35-rt10-yocto-preempt-rt #30
>>>      Hardware name: Xilinx Zynq Platform
>>>      Stopper: multi_cpu_stop+0x0/0x174 <- __stop_cpus.constprop.0+0x48/0x88
>>>       unwind_backtrace from show_stack+0x18/0x1c
>>>       show_stack from dump_stack_lvl+0x58/0x70
>>>       dump_stack_lvl from __might_resched+0x14c/0x1c0
>>>       __might_resched from rt_spin_lock+0x4c/0x84
>>>       rt_spin_lock from etm_dying_cpu+0x28/0x54
>>>       etm_dying_cpu from cpuhp_invoke_callback+0x140/0x33c
>>>       cpuhp_invoke_callback from __cpuhp_invoke_callback_range+0xa4/0x104
>>>       __cpuhp_invoke_callback_range from take_cpu_down+0x7c/0xa8
>>>       take_cpu_down from multi_cpu_stop+0x15c/0x174
>>>       multi_cpu_stop from cpu_stopper_thread+0x9c/0x120
>>>       cpu_stopper_thread from smpboot_thread_fn+0x31c/0x360
>>>       smpboot_thread_fn from kthread+0x100/0x124
>>>       kthread from ret_from_fork+0x14/0x2c
>>>
>>> Convert struct etm_drvdata's spinlock to raw_spinlock to fix it.
>>
>> wait, why will a raw_spinlock fix this?  Why not fix the root problem
>> here, that of calling these locks inproperly in irq context?
>>
>> How is changing to a raw_spinlock going to fix the above splat?
>>
>> thanks,
>>
>> greg k-h
>>
> 
> If it's just etm_starting_cpu() and etm_dying_cpu() as mentioned in the
> commit message then can those spinlocks be removed?
> 
> Surely there can't be any concurrent access to the per-cpu data when the
> hotplug callbacks are called?

Accessing the per-cpu data is not a problem. The spinlocks are there to
protect the accesses to drvdata->mode. etm_starting_cpu() would try to
enable the etm (i.e., start the tracing) if the mode is not DISABLED.
Especially for SYSFS mode, this could be controlled from a different
CPU, affecting the mode. I think we may still be able to avoid this
lock, by allowing the modifications to the mode performed via
enable_hw/disable_hw on the CPU. That way, there cannot be concurrent
modifications to the mode for a given ETM bound to the CPU.

Suzuki
Sebastian Andrzej Siewior Aug. 7, 2023, 1:40 p.m. UTC | #4
On 2023-07-11 15:05:36 [+0800], quanyang.wang@windriver.com wrote:
> diff --git a/drivers/hwtracing/coresight/coresight-etm3x-core.c b/drivers/hwtracing/coresight/coresight-etm3x-core.c
> index 116a91d90ac2..af34fb82f4bb 100644
> --- a/drivers/hwtracing/coresight/coresight-etm3x-core.c
> +++ b/drivers/hwtracing/coresight/coresight-etm3x-core.c
> @@ -518,7 +518,7 @@ static int etm_enable_sysfs(struct coresight_device *csdev)
>  	struct etm_enable_arg arg = { };
>  	int ret;
>  
> -	spin_lock(&drvdata->spinlock);
> +	raw_spin_lock(&drvdata->spinlock);
>  
>  	/* sysfs needs to allocate and set a trace ID */
>  	ret = etm_read_alloc_trace_id(drvdata);

This is not going to work because etm_read_alloc_trace_id() acquires
later in the call chain id_map_lock which is a spinlock_t.
This should also lead to a splat like the one you complain about.

Sebastian
diff mbox series

Patch

diff --git a/drivers/hwtracing/coresight/coresight-etm.h b/drivers/hwtracing/coresight/coresight-etm.h
index 9a0d08b092ae..55e1a9180557 100644
--- a/drivers/hwtracing/coresight/coresight-etm.h
+++ b/drivers/hwtracing/coresight/coresight-etm.h
@@ -233,7 +233,7 @@  struct etm_drvdata {
 	void __iomem			*base;
 	struct clk			*atclk;
 	struct coresight_device		*csdev;
-	spinlock_t			spinlock;
+	raw_spinlock_t			spinlock;
 	int				cpu;
 	int				port_size;
 	u8				arch;
diff --git a/drivers/hwtracing/coresight/coresight-etm3x-core.c b/drivers/hwtracing/coresight/coresight-etm3x-core.c
index 116a91d90ac2..af34fb82f4bb 100644
--- a/drivers/hwtracing/coresight/coresight-etm3x-core.c
+++ b/drivers/hwtracing/coresight/coresight-etm3x-core.c
@@ -518,7 +518,7 @@  static int etm_enable_sysfs(struct coresight_device *csdev)
 	struct etm_enable_arg arg = { };
 	int ret;
 
-	spin_lock(&drvdata->spinlock);
+	raw_spin_lock(&drvdata->spinlock);
 
 	/* sysfs needs to allocate and set a trace ID */
 	ret = etm_read_alloc_trace_id(drvdata);
@@ -545,7 +545,7 @@  static int etm_enable_sysfs(struct coresight_device *csdev)
 		etm_release_trace_id(drvdata);
 
 unlock_enable_sysfs:
-	spin_unlock(&drvdata->spinlock);
+	raw_spin_unlock(&drvdata->spinlock);
 
 	if (!ret)
 		dev_dbg(&csdev->dev, "ETM tracing enabled\n");
@@ -647,7 +647,7 @@  static void etm_disable_sysfs(struct coresight_device *csdev)
 	 * DYING hotplug callback is serviced by the ETM driver.
 	 */
 	cpus_read_lock();
-	spin_lock(&drvdata->spinlock);
+	raw_spin_lock(&drvdata->spinlock);
 
 	/*
 	 * Executing etm_disable_hw on the cpu whose ETM is being disabled
@@ -655,7 +655,7 @@  static void etm_disable_sysfs(struct coresight_device *csdev)
 	 */
 	smp_call_function_single(drvdata->cpu, etm_disable_hw, drvdata, 1);
 
-	spin_unlock(&drvdata->spinlock);
+	raw_spin_unlock(&drvdata->spinlock);
 	cpus_read_unlock();
 
 	/*
@@ -724,7 +724,7 @@  static int etm_starting_cpu(unsigned int cpu)
 	if (!etmdrvdata[cpu])
 		return 0;
 
-	spin_lock(&etmdrvdata[cpu]->spinlock);
+	raw_spin_lock(&etmdrvdata[cpu]->spinlock);
 	if (!etmdrvdata[cpu]->os_unlock) {
 		etm_os_unlock(etmdrvdata[cpu]);
 		etmdrvdata[cpu]->os_unlock = true;
@@ -732,7 +732,7 @@  static int etm_starting_cpu(unsigned int cpu)
 
 	if (local_read(&etmdrvdata[cpu]->mode))
 		etm_enable_hw(etmdrvdata[cpu]);
-	spin_unlock(&etmdrvdata[cpu]->spinlock);
+	raw_spin_unlock(&etmdrvdata[cpu]->spinlock);
 	return 0;
 }
 
@@ -741,10 +741,10 @@  static int etm_dying_cpu(unsigned int cpu)
 	if (!etmdrvdata[cpu])
 		return 0;
 
-	spin_lock(&etmdrvdata[cpu]->spinlock);
+	raw_spin_lock(&etmdrvdata[cpu]->spinlock);
 	if (local_read(&etmdrvdata[cpu]->mode))
 		etm_disable_hw(etmdrvdata[cpu]);
-	spin_unlock(&etmdrvdata[cpu]->spinlock);
+	raw_spin_unlock(&etmdrvdata[cpu]->spinlock);
 	return 0;
 }
 
@@ -871,7 +871,7 @@  static int etm_probe(struct amba_device *adev, const struct amba_id *id)
 	drvdata->base = base;
 	desc.access = CSDEV_ACCESS_IOMEM(base);
 
-	spin_lock_init(&drvdata->spinlock);
+	raw_spin_lock_init(&drvdata->spinlock);
 
 	drvdata->atclk = devm_clk_get(&adev->dev, "atclk"); /* optional */
 	if (!IS_ERR(drvdata->atclk)) {
diff --git a/drivers/hwtracing/coresight/coresight-etm3x-sysfs.c b/drivers/hwtracing/coresight/coresight-etm3x-sysfs.c
index 2f271b7fb048..64660b47f068 100644
--- a/drivers/hwtracing/coresight/coresight-etm3x-sysfs.c
+++ b/drivers/hwtracing/coresight/coresight-etm3x-sysfs.c
@@ -49,13 +49,13 @@  static ssize_t etmsr_show(struct device *dev,
 	struct etm_drvdata *drvdata = dev_get_drvdata(dev->parent);
 
 	pm_runtime_get_sync(dev->parent);
-	spin_lock_irqsave(&drvdata->spinlock, flags);
+	raw_spin_lock_irqsave(&drvdata->spinlock, flags);
 	CS_UNLOCK(drvdata->base);
 
 	val = etm_readl(drvdata, ETMSR);
 
 	CS_LOCK(drvdata->base);
-	spin_unlock_irqrestore(&drvdata->spinlock, flags);
+	raw_spin_unlock_irqrestore(&drvdata->spinlock, flags);
 	pm_runtime_put(dev->parent);
 
 	return sprintf(buf, "%#lx\n", val);
@@ -76,7 +76,7 @@  static ssize_t reset_store(struct device *dev,
 		return ret;
 
 	if (val) {
-		spin_lock(&drvdata->spinlock);
+		raw_spin_lock(&drvdata->spinlock);
 		memset(config, 0, sizeof(struct etm_config));
 		config->mode = ETM_MODE_EXCLUDE;
 		config->trigger_event = ETM_DEFAULT_EVENT_VAL;
@@ -86,7 +86,7 @@  static ssize_t reset_store(struct device *dev,
 
 		etm_set_default(config);
 		etm_release_trace_id(drvdata);
-		spin_unlock(&drvdata->spinlock);
+		raw_spin_unlock(&drvdata->spinlock);
 	}
 
 	return size;
@@ -117,7 +117,7 @@  static ssize_t mode_store(struct device *dev,
 	if (ret)
 		return ret;
 
-	spin_lock(&drvdata->spinlock);
+	raw_spin_lock(&drvdata->spinlock);
 	config->mode = val & ETM_MODE_ALL;
 
 	if (config->mode & ETM_MODE_EXCLUDE)
@@ -168,12 +168,12 @@  static ssize_t mode_store(struct device *dev,
 	if (config->mode & (ETM_MODE_EXCL_KERN | ETM_MODE_EXCL_USER))
 		etm_config_trace_mode(config);
 
-	spin_unlock(&drvdata->spinlock);
+	raw_spin_unlock(&drvdata->spinlock);
 
 	return size;
 
 err_unlock:
-	spin_unlock(&drvdata->spinlock);
+	raw_spin_unlock(&drvdata->spinlock);
 	return ret;
 }
 static DEVICE_ATTR_RW(mode);
@@ -299,9 +299,9 @@  static ssize_t addr_idx_store(struct device *dev,
 	 * Use spinlock to ensure index doesn't change while it gets
 	 * dereferenced multiple times within a spinlock block elsewhere.
 	 */
-	spin_lock(&drvdata->spinlock);
+	raw_spin_lock(&drvdata->spinlock);
 	config->addr_idx = val;
-	spin_unlock(&drvdata->spinlock);
+	raw_spin_unlock(&drvdata->spinlock);
 
 	return size;
 }
@@ -315,16 +315,16 @@  static ssize_t addr_single_show(struct device *dev,
 	struct etm_drvdata *drvdata = dev_get_drvdata(dev->parent);
 	struct etm_config *config = &drvdata->config;
 
-	spin_lock(&drvdata->spinlock);
+	raw_spin_lock(&drvdata->spinlock);
 	idx = config->addr_idx;
 	if (!(config->addr_type[idx] == ETM_ADDR_TYPE_NONE ||
 	      config->addr_type[idx] == ETM_ADDR_TYPE_SINGLE)) {
-		spin_unlock(&drvdata->spinlock);
+		raw_spin_unlock(&drvdata->spinlock);
 		return -EINVAL;
 	}
 
 	val = config->addr_val[idx];
-	spin_unlock(&drvdata->spinlock);
+	raw_spin_unlock(&drvdata->spinlock);
 
 	return sprintf(buf, "%#lx\n", val);
 }
@@ -343,17 +343,17 @@  static ssize_t addr_single_store(struct device *dev,
 	if (ret)
 		return ret;
 
-	spin_lock(&drvdata->spinlock);
+	raw_spin_lock(&drvdata->spinlock);
 	idx = config->addr_idx;
 	if (!(config->addr_type[idx] == ETM_ADDR_TYPE_NONE ||
 	      config->addr_type[idx] == ETM_ADDR_TYPE_SINGLE)) {
-		spin_unlock(&drvdata->spinlock);
+		raw_spin_unlock(&drvdata->spinlock);
 		return -EINVAL;
 	}
 
 	config->addr_val[idx] = val;
 	config->addr_type[idx] = ETM_ADDR_TYPE_SINGLE;
-	spin_unlock(&drvdata->spinlock);
+	raw_spin_unlock(&drvdata->spinlock);
 
 	return size;
 }
@@ -367,23 +367,23 @@  static ssize_t addr_range_show(struct device *dev,
 	struct etm_drvdata *drvdata = dev_get_drvdata(dev->parent);
 	struct etm_config *config = &drvdata->config;
 
-	spin_lock(&drvdata->spinlock);
+	raw_spin_lock(&drvdata->spinlock);
 	idx = config->addr_idx;
 	if (idx % 2 != 0) {
-		spin_unlock(&drvdata->spinlock);
+		raw_spin_unlock(&drvdata->spinlock);
 		return -EPERM;
 	}
 	if (!((config->addr_type[idx] == ETM_ADDR_TYPE_NONE &&
 	       config->addr_type[idx + 1] == ETM_ADDR_TYPE_NONE) ||
 	      (config->addr_type[idx] == ETM_ADDR_TYPE_RANGE &&
 	       config->addr_type[idx + 1] == ETM_ADDR_TYPE_RANGE))) {
-		spin_unlock(&drvdata->spinlock);
+		raw_spin_unlock(&drvdata->spinlock);
 		return -EPERM;
 	}
 
 	val1 = config->addr_val[idx];
 	val2 = config->addr_val[idx + 1];
-	spin_unlock(&drvdata->spinlock);
+	raw_spin_unlock(&drvdata->spinlock);
 
 	return sprintf(buf, "%#lx %#lx\n", val1, val2);
 }
@@ -403,17 +403,17 @@  static ssize_t addr_range_store(struct device *dev,
 	if (val1 > val2)
 		return -EINVAL;
 
-	spin_lock(&drvdata->spinlock);
+	raw_spin_lock(&drvdata->spinlock);
 	idx = config->addr_idx;
 	if (idx % 2 != 0) {
-		spin_unlock(&drvdata->spinlock);
+		raw_spin_unlock(&drvdata->spinlock);
 		return -EPERM;
 	}
 	if (!((config->addr_type[idx] == ETM_ADDR_TYPE_NONE &&
 	       config->addr_type[idx + 1] == ETM_ADDR_TYPE_NONE) ||
 	      (config->addr_type[idx] == ETM_ADDR_TYPE_RANGE &&
 	       config->addr_type[idx + 1] == ETM_ADDR_TYPE_RANGE))) {
-		spin_unlock(&drvdata->spinlock);
+		raw_spin_unlock(&drvdata->spinlock);
 		return -EPERM;
 	}
 
@@ -422,7 +422,7 @@  static ssize_t addr_range_store(struct device *dev,
 	config->addr_val[idx + 1] = val2;
 	config->addr_type[idx + 1] = ETM_ADDR_TYPE_RANGE;
 	config->enable_ctrl1 |= (1 << (idx/2));
-	spin_unlock(&drvdata->spinlock);
+	raw_spin_unlock(&drvdata->spinlock);
 
 	return size;
 }
@@ -436,16 +436,16 @@  static ssize_t addr_start_show(struct device *dev,
 	struct etm_drvdata *drvdata = dev_get_drvdata(dev->parent);
 	struct etm_config *config = &drvdata->config;
 
-	spin_lock(&drvdata->spinlock);
+	raw_spin_lock(&drvdata->spinlock);
 	idx = config->addr_idx;
 	if (!(config->addr_type[idx] == ETM_ADDR_TYPE_NONE ||
 	      config->addr_type[idx] == ETM_ADDR_TYPE_START)) {
-		spin_unlock(&drvdata->spinlock);
+		raw_spin_unlock(&drvdata->spinlock);
 		return -EPERM;
 	}
 
 	val = config->addr_val[idx];
-	spin_unlock(&drvdata->spinlock);
+	raw_spin_unlock(&drvdata->spinlock);
 
 	return sprintf(buf, "%#lx\n", val);
 }
@@ -464,11 +464,11 @@  static ssize_t addr_start_store(struct device *dev,
 	if (ret)
 		return ret;
 
-	spin_lock(&drvdata->spinlock);
+	raw_spin_lock(&drvdata->spinlock);
 	idx = config->addr_idx;
 	if (!(config->addr_type[idx] == ETM_ADDR_TYPE_NONE ||
 	      config->addr_type[idx] == ETM_ADDR_TYPE_START)) {
-		spin_unlock(&drvdata->spinlock);
+		raw_spin_unlock(&drvdata->spinlock);
 		return -EPERM;
 	}
 
@@ -476,7 +476,7 @@  static ssize_t addr_start_store(struct device *dev,
 	config->addr_type[idx] = ETM_ADDR_TYPE_START;
 	config->startstop_ctrl |= (1 << idx);
 	config->enable_ctrl1 |= ETMTECR1_START_STOP;
-	spin_unlock(&drvdata->spinlock);
+	raw_spin_unlock(&drvdata->spinlock);
 
 	return size;
 }
@@ -490,16 +490,16 @@  static ssize_t addr_stop_show(struct device *dev,
 	struct etm_drvdata *drvdata = dev_get_drvdata(dev->parent);
 	struct etm_config *config = &drvdata->config;
 
-	spin_lock(&drvdata->spinlock);
+	raw_spin_lock(&drvdata->spinlock);
 	idx = config->addr_idx;
 	if (!(config->addr_type[idx] == ETM_ADDR_TYPE_NONE ||
 	      config->addr_type[idx] == ETM_ADDR_TYPE_STOP)) {
-		spin_unlock(&drvdata->spinlock);
+		raw_spin_unlock(&drvdata->spinlock);
 		return -EPERM;
 	}
 
 	val = config->addr_val[idx];
-	spin_unlock(&drvdata->spinlock);
+	raw_spin_unlock(&drvdata->spinlock);
 
 	return sprintf(buf, "%#lx\n", val);
 }
@@ -518,11 +518,11 @@  static ssize_t addr_stop_store(struct device *dev,
 	if (ret)
 		return ret;
 
-	spin_lock(&drvdata->spinlock);
+	raw_spin_lock(&drvdata->spinlock);
 	idx = config->addr_idx;
 	if (!(config->addr_type[idx] == ETM_ADDR_TYPE_NONE ||
 	      config->addr_type[idx] == ETM_ADDR_TYPE_STOP)) {
-		spin_unlock(&drvdata->spinlock);
+		raw_spin_unlock(&drvdata->spinlock);
 		return -EPERM;
 	}
 
@@ -530,7 +530,7 @@  static ssize_t addr_stop_store(struct device *dev,
 	config->addr_type[idx] = ETM_ADDR_TYPE_STOP;
 	config->startstop_ctrl |= (1 << (idx + 16));
 	config->enable_ctrl1 |= ETMTECR1_START_STOP;
-	spin_unlock(&drvdata->spinlock);
+	raw_spin_unlock(&drvdata->spinlock);
 
 	return size;
 }
@@ -543,9 +543,9 @@  static ssize_t addr_acctype_show(struct device *dev,
 	struct etm_drvdata *drvdata = dev_get_drvdata(dev->parent);
 	struct etm_config *config = &drvdata->config;
 
-	spin_lock(&drvdata->spinlock);
+	raw_spin_lock(&drvdata->spinlock);
 	val = config->addr_acctype[config->addr_idx];
-	spin_unlock(&drvdata->spinlock);
+	raw_spin_unlock(&drvdata->spinlock);
 
 	return sprintf(buf, "%#lx\n", val);
 }
@@ -563,9 +563,9 @@  static ssize_t addr_acctype_store(struct device *dev,
 	if (ret)
 		return ret;
 
-	spin_lock(&drvdata->spinlock);
+	raw_spin_lock(&drvdata->spinlock);
 	config->addr_acctype[config->addr_idx] = val;
-	spin_unlock(&drvdata->spinlock);
+	raw_spin_unlock(&drvdata->spinlock);
 
 	return size;
 }
@@ -601,9 +601,9 @@  static ssize_t cntr_idx_store(struct device *dev,
 	 * Use spinlock to ensure index doesn't change while it gets
 	 * dereferenced multiple times within a spinlock block elsewhere.
 	 */
-	spin_lock(&drvdata->spinlock);
+	raw_spin_lock(&drvdata->spinlock);
 	config->cntr_idx = val;
-	spin_unlock(&drvdata->spinlock);
+	raw_spin_unlock(&drvdata->spinlock);
 
 	return size;
 }
@@ -616,9 +616,9 @@  static ssize_t cntr_rld_val_show(struct device *dev,
 	struct etm_drvdata *drvdata = dev_get_drvdata(dev->parent);
 	struct etm_config *config = &drvdata->config;
 
-	spin_lock(&drvdata->spinlock);
+	raw_spin_lock(&drvdata->spinlock);
 	val = config->cntr_rld_val[config->cntr_idx];
-	spin_unlock(&drvdata->spinlock);
+	raw_spin_unlock(&drvdata->spinlock);
 
 	return sprintf(buf, "%#lx\n", val);
 }
@@ -636,9 +636,9 @@  static ssize_t cntr_rld_val_store(struct device *dev,
 	if (ret)
 		return ret;
 
-	spin_lock(&drvdata->spinlock);
+	raw_spin_lock(&drvdata->spinlock);
 	config->cntr_rld_val[config->cntr_idx] = val;
-	spin_unlock(&drvdata->spinlock);
+	raw_spin_unlock(&drvdata->spinlock);
 
 	return size;
 }
@@ -651,9 +651,9 @@  static ssize_t cntr_event_show(struct device *dev,
 	struct etm_drvdata *drvdata = dev_get_drvdata(dev->parent);
 	struct etm_config *config = &drvdata->config;
 
-	spin_lock(&drvdata->spinlock);
+	raw_spin_lock(&drvdata->spinlock);
 	val = config->cntr_event[config->cntr_idx];
-	spin_unlock(&drvdata->spinlock);
+	raw_spin_unlock(&drvdata->spinlock);
 
 	return sprintf(buf, "%#lx\n", val);
 }
@@ -671,9 +671,9 @@  static ssize_t cntr_event_store(struct device *dev,
 	if (ret)
 		return ret;
 
-	spin_lock(&drvdata->spinlock);
+	raw_spin_lock(&drvdata->spinlock);
 	config->cntr_event[config->cntr_idx] = val & ETM_EVENT_MASK;
-	spin_unlock(&drvdata->spinlock);
+	raw_spin_unlock(&drvdata->spinlock);
 
 	return size;
 }
@@ -686,9 +686,9 @@  static ssize_t cntr_rld_event_show(struct device *dev,
 	struct etm_drvdata *drvdata = dev_get_drvdata(dev->parent);
 	struct etm_config *config = &drvdata->config;
 
-	spin_lock(&drvdata->spinlock);
+	raw_spin_lock(&drvdata->spinlock);
 	val = config->cntr_rld_event[config->cntr_idx];
-	spin_unlock(&drvdata->spinlock);
+	raw_spin_unlock(&drvdata->spinlock);
 
 	return sprintf(buf, "%#lx\n", val);
 }
@@ -706,9 +706,9 @@  static ssize_t cntr_rld_event_store(struct device *dev,
 	if (ret)
 		return ret;
 
-	spin_lock(&drvdata->spinlock);
+	raw_spin_lock(&drvdata->spinlock);
 	config->cntr_rld_event[config->cntr_idx] = val & ETM_EVENT_MASK;
-	spin_unlock(&drvdata->spinlock);
+	raw_spin_unlock(&drvdata->spinlock);
 
 	return size;
 }
@@ -723,11 +723,11 @@  static ssize_t cntr_val_show(struct device *dev,
 	struct etm_config *config = &drvdata->config;
 
 	if (!local_read(&drvdata->mode)) {
-		spin_lock(&drvdata->spinlock);
+		raw_spin_lock(&drvdata->spinlock);
 		for (i = 0; i < drvdata->nr_cntr; i++)
 			ret += sprintf(buf, "counter %d: %x\n",
 				       i, config->cntr_val[i]);
-		spin_unlock(&drvdata->spinlock);
+		raw_spin_unlock(&drvdata->spinlock);
 		return ret;
 	}
 
@@ -752,9 +752,9 @@  static ssize_t cntr_val_store(struct device *dev,
 	if (ret)
 		return ret;
 
-	spin_lock(&drvdata->spinlock);
+	raw_spin_lock(&drvdata->spinlock);
 	config->cntr_val[config->cntr_idx] = val;
-	spin_unlock(&drvdata->spinlock);
+	raw_spin_unlock(&drvdata->spinlock);
 
 	return size;
 }
@@ -947,13 +947,13 @@  static ssize_t seq_curr_state_show(struct device *dev,
 	}
 
 	pm_runtime_get_sync(dev->parent);
-	spin_lock_irqsave(&drvdata->spinlock, flags);
+	raw_spin_lock_irqsave(&drvdata->spinlock, flags);
 
 	CS_UNLOCK(drvdata->base);
 	val = (etm_readl(drvdata, ETMSQR) & ETM_SQR_MASK);
 	CS_LOCK(drvdata->base);
 
-	spin_unlock_irqrestore(&drvdata->spinlock, flags);
+	raw_spin_unlock_irqrestore(&drvdata->spinlock, flags);
 	pm_runtime_put(dev->parent);
 out:
 	return sprintf(buf, "%#lx\n", val);
@@ -1012,9 +1012,9 @@  static ssize_t ctxid_idx_store(struct device *dev,
 	 * Use spinlock to ensure index doesn't change while it gets
 	 * dereferenced multiple times within a spinlock block elsewhere.
 	 */
-	spin_lock(&drvdata->spinlock);
+	raw_spin_lock(&drvdata->spinlock);
 	config->ctxid_idx = val;
-	spin_unlock(&drvdata->spinlock);
+	raw_spin_unlock(&drvdata->spinlock);
 
 	return size;
 }
@@ -1034,9 +1034,9 @@  static ssize_t ctxid_pid_show(struct device *dev,
 	if (task_active_pid_ns(current) != &init_pid_ns)
 		return -EINVAL;
 
-	spin_lock(&drvdata->spinlock);
+	raw_spin_lock(&drvdata->spinlock);
 	val = config->ctxid_pid[config->ctxid_idx];
-	spin_unlock(&drvdata->spinlock);
+	raw_spin_unlock(&drvdata->spinlock);
 
 	return sprintf(buf, "%#lx\n", val);
 }
@@ -1066,9 +1066,9 @@  static ssize_t ctxid_pid_store(struct device *dev,
 	if (ret)
 		return ret;
 
-	spin_lock(&drvdata->spinlock);
+	raw_spin_lock(&drvdata->spinlock);
 	config->ctxid_pid[config->ctxid_idx] = pid;
-	spin_unlock(&drvdata->spinlock);
+	raw_spin_unlock(&drvdata->spinlock);
 
 	return size;
 }