diff mbox

[v2,1/2] ALSA: fireworks: accessing to user space outside spinlock

Message ID 1472511695-29316-2-git-send-email-o-takashi@sakamocchi.jp (mailing list archive)
State New, archived
Headers show

Commit Message

Takashi Sakamoto Aug. 29, 2016, 11:01 p.m. UTC
In hwdep interface of fireworks driver, accessing to user space is in a
critical section with disabled local interrupt. Depending on architecture,
accessing to user space can cause page fault exception. Then local
processor stores machine status and handles the synchronous event. A
handler corresponding to the event can call task scheduler to wait for
preparing pages. In a case of usage of single core processor, the state to
disable local interrupt is worse because it don't handle usual interrupts
from hardware.

This commit fixes this bug, performing the accessing outside spinlock.

Reported-by: Vaishali Thakkar <vaishali.thakkar@oracle.com>
Cc: stable@vger.kernel.org
Fixes: 555e8a8f7f14('ALSA: fireworks: Add command/response functionality into hwdep interface')
Signed-off-by: Takashi Sakamoto <o-takashi@sakamocchi.jp>
---
 sound/firewire/fireworks/fireworks_hwdep.c | 21 ++++++++++++++++-----
 1 file changed, 16 insertions(+), 5 deletions(-)

Comments

Takashi Iwai Aug. 30, 2016, 5:19 a.m. UTC | #1
On Tue, 30 Aug 2016 01:01:34 +0200,
Takashi Sakamoto wrote:
> 
> In hwdep interface of fireworks driver, accessing to user space is in a
> critical section with disabled local interrupt. Depending on architecture,
> accessing to user space can cause page fault exception. Then local
> processor stores machine status and handles the synchronous event. A
> handler corresponding to the event can call task scheduler to wait for
> preparing pages. In a case of usage of single core processor, the state to
> disable local interrupt is worse because it don't handle usual interrupts
> from hardware.
> 
> This commit fixes this bug, performing the accessing outside spinlock.
> 
> Reported-by: Vaishali Thakkar <vaishali.thakkar@oracle.com>
> Cc: stable@vger.kernel.org
> Fixes: 555e8a8f7f14('ALSA: fireworks: Add command/response functionality into hwdep interface')
> Signed-off-by: Takashi Sakamoto <o-takashi@sakamocchi.jp>
> ---
>  sound/firewire/fireworks/fireworks_hwdep.c | 21 ++++++++++++++++-----
>  1 file changed, 16 insertions(+), 5 deletions(-)
> 
> diff --git a/sound/firewire/fireworks/fireworks_hwdep.c b/sound/firewire/fireworks/fireworks_hwdep.c
> index 33df865..8ed4b29 100644
> --- a/sound/firewire/fireworks/fireworks_hwdep.c
> +++ b/sound/firewire/fireworks/fireworks_hwdep.c
> @@ -38,6 +38,7 @@ hwdep_read_resp_buf(struct snd_efw *efw, char __user *buf, long remained,
>  	buf += sizeof(type);
>  
>  	/* write into buffer as many responses as possible */
> +	spin_lock_irq(&efw->lock);
>  	while (efw->resp_queues > 0) {
>  		t = (struct snd_efw_transaction *)(efw->pull_ptr);
>  		length = be32_to_cpu(t->length) * sizeof(__be32);
> @@ -52,9 +53,13 @@ hwdep_read_resp_buf(struct snd_efw *efw, char __user *buf, long remained,
>  				(unsigned int)(efw->pull_ptr - efw->resp_buf);
>  			till_end = min_t(unsigned int, length, till_end);
>  
> +			spin_unlock_irq(&efw->lock);
> +
>  			if (copy_to_user(buf, efw->pull_ptr, till_end))
>  				return -EFAULT;
>  
> +			spin_lock_irq(&efw->lock);
> +
>  			efw->pull_ptr += till_end;

Strictly speaking, this is still racy.  efw->pull_ptr is accessed
outside the spinlock, and it can be modified while copy_to_user() is
being processed.


Takashi

>  			if (efw->pull_ptr >= efw->resp_buf +
>  					     snd_efw_resp_buf_size)
> @@ -69,6 +74,8 @@ hwdep_read_resp_buf(struct snd_efw *efw, char __user *buf, long remained,
>  		efw->resp_queues--;
>  	}
>  
> +	spin_unlock_irq(&efw->lock);
> +
>  	return count;
>  }
>  
> @@ -76,14 +83,17 @@ static long
>  hwdep_read_locked(struct snd_efw *efw, char __user *buf, long count,
>  		  loff_t *offset)
>  {
> -	union snd_firewire_event event;
> +	union snd_firewire_event event = {
> +		.lock_status.type = SNDRV_FIREWIRE_EVENT_LOCK_STATUS,
> +	};
>  
> -	memset(&event, 0, sizeof(event));
> +	spin_lock_irq(&efw->lock);
>  
> -	event.lock_status.type = SNDRV_FIREWIRE_EVENT_LOCK_STATUS;
>  	event.lock_status.status = (efw->dev_lock_count > 0);
>  	efw->dev_lock_changed = false;
>  
> +	spin_unlock_irq(&efw->lock);
> +
>  	count = min_t(long, count, sizeof(event.lock_status));
>  
>  	if (copy_to_user(buf, &event, count))
> @@ -111,13 +121,14 @@ hwdep_read(struct snd_hwdep *hwdep, char __user *buf, long count,
>  		spin_lock_irq(&efw->lock);
>  	}
>  
> +	/* Loosely release the lock here, but still safe. */
> +	spin_unlock_irq(&efw->lock);
> +
>  	if (efw->dev_lock_changed)
>  		count = hwdep_read_locked(efw, buf, count, offset);
>  	else if (efw->resp_queues > 0)
>  		count = hwdep_read_resp_buf(efw, buf, count, offset);
>  
> -	spin_unlock_irq(&efw->lock);
> -
>  	return count;
>  }
>  
> -- 
> 2.7.4
>
Vaishali Thakkar Aug. 30, 2016, 6:07 a.m. UTC | #2
On Tuesday 30 August 2016 10:49 AM, Takashi Iwai wrote:
> On Tue, 30 Aug 2016 01:01:34 +0200,
> Takashi Sakamoto wrote:
>>
>> In hwdep interface of fireworks driver, accessing to user space is in a
>> critical section with disabled local interrupt. Depending on architecture,
>> accessing to user space can cause page fault exception. Then local
>> processor stores machine status and handles the synchronous event. A
>> handler corresponding to the event can call task scheduler to wait for
>> preparing pages. In a case of usage of single core processor, the state to
>> disable local interrupt is worse because it don't handle usual interrupts
>> from hardware.
>>
>> This commit fixes this bug, performing the accessing outside spinlock.
>>
>> Reported-by: Vaishali Thakkar <vaishali.thakkar@oracle.com>
>> Cc: stable@vger.kernel.org
>> Fixes: 555e8a8f7f14('ALSA: fireworks: Add command/response functionality into hwdep interface')
>> Signed-off-by: Takashi Sakamoto <o-takashi@sakamocchi.jp>
>> ---
>>  sound/firewire/fireworks/fireworks_hwdep.c | 21 ++++++++++++++++-----
>>  1 file changed, 16 insertions(+), 5 deletions(-)
>>
>> diff --git a/sound/firewire/fireworks/fireworks_hwdep.c b/sound/firewire/fireworks/fireworks_hwdep.c
>> index 33df865..8ed4b29 100644
>> --- a/sound/firewire/fireworks/fireworks_hwdep.c
>> +++ b/sound/firewire/fireworks/fireworks_hwdep.c
>> @@ -38,6 +38,7 @@ hwdep_read_resp_buf(struct snd_efw *efw, char __user *buf, long remained,
>>  	buf += sizeof(type);
>>  
>>  	/* write into buffer as many responses as possible */
>> +	spin_lock_irq(&efw->lock);
>>  	while (efw->resp_queues > 0) {
>>  		t = (struct snd_efw_transaction *)(efw->pull_ptr);
>>  		length = be32_to_cpu(t->length) * sizeof(__be32);
>> @@ -52,9 +53,13 @@ hwdep_read_resp_buf(struct snd_efw *efw, char __user *buf, long remained,
>>  				(unsigned int)(efw->pull_ptr - efw->resp_buf);
>>  			till_end = min_t(unsigned int, length, till_end);
>>  
>> +			spin_unlock_irq(&efw->lock);
>> +
>>  			if (copy_to_user(buf, efw->pull_ptr, till_end))
>>  				return -EFAULT;
>>  
>> +			spin_lock_irq(&efw->lock);
>> +
>>  			efw->pull_ptr += till_end;
> 
> Strictly speaking, this is still racy.  efw->pull_ptr is accessed
> outside the spinlock, and it can be modified while copy_to_user() is
> being processed.

Should disabling pagefaults while interacting with user space work here?

> 
> Takashi
> 
>>  			if (efw->pull_ptr >= efw->resp_buf +
>>  					     snd_efw_resp_buf_size)
>> @@ -69,6 +74,8 @@ hwdep_read_resp_buf(struct snd_efw *efw, char __user *buf, long remained,
>>  		efw->resp_queues--;
>>  	}
>>  
>> +	spin_unlock_irq(&efw->lock);
>> +
>>  	return count;
>>  }
>>  
>> @@ -76,14 +83,17 @@ static long
>>  hwdep_read_locked(struct snd_efw *efw, char __user *buf, long count,
>>  		  loff_t *offset)
>>  {
>> -	union snd_firewire_event event;
>> +	union snd_firewire_event event = {
>> +		.lock_status.type = SNDRV_FIREWIRE_EVENT_LOCK_STATUS,
>> +	};
>>  
>> -	memset(&event, 0, sizeof(event));
>> +	spin_lock_irq(&efw->lock);
>>  
>> -	event.lock_status.type = SNDRV_FIREWIRE_EVENT_LOCK_STATUS;
>>  	event.lock_status.status = (efw->dev_lock_count > 0);
>>  	efw->dev_lock_changed = false;
>>  
>> +	spin_unlock_irq(&efw->lock);
>> +
>>  	count = min_t(long, count, sizeof(event.lock_status));
>>  
>>  	if (copy_to_user(buf, &event, count))
>> @@ -111,13 +121,14 @@ hwdep_read(struct snd_hwdep *hwdep, char __user *buf, long count,
>>  		spin_lock_irq(&efw->lock);
>>  	}
>>  
>> +	/* Loosely release the lock here, but still safe. */
>> +	spin_unlock_irq(&efw->lock);
>> +
>>  	if (efw->dev_lock_changed)
>>  		count = hwdep_read_locked(efw, buf, count, offset);
>>  	else if (efw->resp_queues > 0)
>>  		count = hwdep_read_resp_buf(efw, buf, count, offset);
>>  
>> -	spin_unlock_irq(&efw->lock);
>> -
>>  	return count;
>>  }
>>  
>> -- 
>> 2.7.4
>>
Takashi Sakamoto Aug. 30, 2016, 6:27 a.m. UTC | #3
On Aug 30 2016 15:07, Vaishali Thakkar wrote:
>>> diff --git a/sound/firewire/fireworks/fireworks_hwdep.c b/sound/firewire/fireworks/fireworks_hwdep.c
>>> index 33df865..8ed4b29 100644
>>> --- a/sound/firewire/fireworks/fireworks_hwdep.c
>>> +++ b/sound/firewire/fireworks/fireworks_hwdep.c
>>> @@ -38,6 +38,7 @@ hwdep_read_resp_buf(struct snd_efw *efw, char __user *buf, long remained,
>>>  	buf += sizeof(type);
>>>
>>>  	/* write into buffer as many responses as possible */
>>> +	spin_lock_irq(&efw->lock);
>>>  	while (efw->resp_queues > 0) {
>>>  		t = (struct snd_efw_transaction *)(efw->pull_ptr);
>>>  		length = be32_to_cpu(t->length) * sizeof(__be32);
>>> @@ -52,9 +53,13 @@ hwdep_read_resp_buf(struct snd_efw *efw, char __user *buf, long remained,
>>>  				(unsigned int)(efw->pull_ptr - efw->resp_buf);
>>>  			till_end = min_t(unsigned int, length, till_end);
>>>
>>> +			spin_unlock_irq(&efw->lock);
>>> +
>>>  			if (copy_to_user(buf, efw->pull_ptr, till_end))
>>>  				return -EFAULT;
>>>
>>> +			spin_lock_irq(&efw->lock);
>>> +
>>>  			efw->pull_ptr += till_end;
>>
>> Strictly speaking, this is still racy.  efw->pull_ptr is accessed
>> outside the spinlock, and it can be modified while copy_to_user() is
>> being processed.
>
> Should disabling pagefaults while interacting with user space work here?

Instead, assignment efw->pull_ptr to automatic variable in the spin 
lock, then use it to copy_to_user(). The pointer is in ring buffer and 
moves when receiving replies from actual devices, thus it's still valid 
as long as an application sent transactions in order.
(One application is allowed to use this interface in a time.)

Not the best, but better with less complexities, I think.


Regards

Takashi Sakamoto
Takashi Iwai Aug. 30, 2016, 6:54 a.m. UTC | #4
On Tue, 30 Aug 2016 08:27:34 +0200,
Takashi Sakamoto wrote:
> 
> On Aug 30 2016 15:07, Vaishali Thakkar wrote:
> >>> diff --git a/sound/firewire/fireworks/fireworks_hwdep.c b/sound/firewire/fireworks/fireworks_hwdep.c
> >>> index 33df865..8ed4b29 100644
> >>> --- a/sound/firewire/fireworks/fireworks_hwdep.c
> >>> +++ b/sound/firewire/fireworks/fireworks_hwdep.c
> >>> @@ -38,6 +38,7 @@ hwdep_read_resp_buf(struct snd_efw *efw, char __user *buf, long remained,
> >>>  	buf += sizeof(type);
> >>>
> >>>  	/* write into buffer as many responses as possible */
> >>> +	spin_lock_irq(&efw->lock);
> >>>  	while (efw->resp_queues > 0) {
> >>>  		t = (struct snd_efw_transaction *)(efw->pull_ptr);
> >>>  		length = be32_to_cpu(t->length) * sizeof(__be32);
> >>> @@ -52,9 +53,13 @@ hwdep_read_resp_buf(struct snd_efw *efw, char __user *buf, long remained,
> >>>  				(unsigned int)(efw->pull_ptr - efw->resp_buf);
> >>>  			till_end = min_t(unsigned int, length, till_end);
> >>>
> >>> +			spin_unlock_irq(&efw->lock);
> >>> +
> >>>  			if (copy_to_user(buf, efw->pull_ptr, till_end))
> >>>  				return -EFAULT;
> >>>
> >>> +			spin_lock_irq(&efw->lock);
> >>> +
> >>>  			efw->pull_ptr += till_end;
> >>
> >> Strictly speaking, this is still racy.  efw->pull_ptr is accessed
> >> outside the spinlock, and it can be modified while copy_to_user() is
> >> being processed.
> >
> > Should disabling pagefaults while interacting with user space work here?
> 
> Instead, assignment efw->pull_ptr to automatic variable in the spin
> lock, then use it to copy_to_user(). The pointer is in ring buffer and
> moves when receiving replies from actual devices, thus it's still
> valid as long as an application sent transactions in order.
> (One application is allowed to use this interface in a time.)
> 
> Not the best, but better with less complexities, I think.

Yes, that's one of workarounds, and maybe the most feasible choice in
this case.


Takashi
diff mbox

Patch

diff --git a/sound/firewire/fireworks/fireworks_hwdep.c b/sound/firewire/fireworks/fireworks_hwdep.c
index 33df865..8ed4b29 100644
--- a/sound/firewire/fireworks/fireworks_hwdep.c
+++ b/sound/firewire/fireworks/fireworks_hwdep.c
@@ -38,6 +38,7 @@  hwdep_read_resp_buf(struct snd_efw *efw, char __user *buf, long remained,
 	buf += sizeof(type);
 
 	/* write into buffer as many responses as possible */
+	spin_lock_irq(&efw->lock);
 	while (efw->resp_queues > 0) {
 		t = (struct snd_efw_transaction *)(efw->pull_ptr);
 		length = be32_to_cpu(t->length) * sizeof(__be32);
@@ -52,9 +53,13 @@  hwdep_read_resp_buf(struct snd_efw *efw, char __user *buf, long remained,
 				(unsigned int)(efw->pull_ptr - efw->resp_buf);
 			till_end = min_t(unsigned int, length, till_end);
 
+			spin_unlock_irq(&efw->lock);
+
 			if (copy_to_user(buf, efw->pull_ptr, till_end))
 				return -EFAULT;
 
+			spin_lock_irq(&efw->lock);
+
 			efw->pull_ptr += till_end;
 			if (efw->pull_ptr >= efw->resp_buf +
 					     snd_efw_resp_buf_size)
@@ -69,6 +74,8 @@  hwdep_read_resp_buf(struct snd_efw *efw, char __user *buf, long remained,
 		efw->resp_queues--;
 	}
 
+	spin_unlock_irq(&efw->lock);
+
 	return count;
 }
 
@@ -76,14 +83,17 @@  static long
 hwdep_read_locked(struct snd_efw *efw, char __user *buf, long count,
 		  loff_t *offset)
 {
-	union snd_firewire_event event;
+	union snd_firewire_event event = {
+		.lock_status.type = SNDRV_FIREWIRE_EVENT_LOCK_STATUS,
+	};
 
-	memset(&event, 0, sizeof(event));
+	spin_lock_irq(&efw->lock);
 
-	event.lock_status.type = SNDRV_FIREWIRE_EVENT_LOCK_STATUS;
 	event.lock_status.status = (efw->dev_lock_count > 0);
 	efw->dev_lock_changed = false;
 
+	spin_unlock_irq(&efw->lock);
+
 	count = min_t(long, count, sizeof(event.lock_status));
 
 	if (copy_to_user(buf, &event, count))
@@ -111,13 +121,14 @@  hwdep_read(struct snd_hwdep *hwdep, char __user *buf, long count,
 		spin_lock_irq(&efw->lock);
 	}
 
+	/* Loosely release the lock here, but still safe. */
+	spin_unlock_irq(&efw->lock);
+
 	if (efw->dev_lock_changed)
 		count = hwdep_read_locked(efw, buf, count, offset);
 	else if (efw->resp_queues > 0)
 		count = hwdep_read_resp_buf(efw, buf, count, offset);
 
-	spin_unlock_irq(&efw->lock);
-
 	return count;
 }