diff mbox

RDS: sync congestion map updating

Message ID 1459328902-31968-1-git-send-email-wen.gang.wang@oracle.com (mailing list archive)
State Superseded
Headers show

Commit Message

Wengang Wang March 30, 2016, 9:08 a.m. UTC
Problem is found that some among a lot of parallel RDS communications hang.
In my test ten or so among 33 communications hang. The send requests got
-ENOBUF error meaning the peer socket (port) is congested. But meanwhile,
peer socket (port) is not congested.

The congestion map updating can happen in two paths: one is in rds_recvmsg path
and the other is when it receives packets from the hardware. There is no
synchronization when updating the congestion map. So a bit operation (clearing)
in the rds_recvmsg path can be skipped by another bit operation (setting) in
hardware packet receving path.

Fix is to add a spin lock per congestion map to sync the update on it.
No performance drop found during the test for the fix.

Signed-off-by: Wengang Wang <wen.gang.wang@oracle.com>
---
 net/rds/cong.c | 7 +++++++
 net/rds/rds.h  | 1 +
 2 files changed, 8 insertions(+)

Comments

Leon Romanovsky March 30, 2016, 4:19 p.m. UTC | #1
On Wed, Mar 30, 2016 at 05:08:22PM +0800, Wengang Wang wrote:
> Problem is found that some among a lot of parallel RDS communications hang.
> In my test ten or so among 33 communications hang. The send requests got
> -ENOBUF error meaning the peer socket (port) is congested. But meanwhile,
> peer socket (port) is not congested.
> 
> The congestion map updating can happen in two paths: one is in rds_recvmsg path
> and the other is when it receives packets from the hardware. There is no
> synchronization when updating the congestion map. So a bit operation (clearing)
> in the rds_recvmsg path can be skipped by another bit operation (setting) in
> hardware packet receving path.
> 
> Fix is to add a spin lock per congestion map to sync the update on it.
> No performance drop found during the test for the fix.

I assume that this change fixed your issue, however it looks suspicious
that performance wasn't change.

> 
> Signed-off-by: Wengang Wang <wen.gang.wang@oracle.com>
> ---
>  net/rds/cong.c | 7 +++++++
>  net/rds/rds.h  | 1 +
>  2 files changed, 8 insertions(+)

According to get_maintainer script, you send this patch to wrong lists
and persons.

?  linux git:(master) ./scripts/get_maintainer.pl -f net/rds/cong.c
Santosh Shilimkar <santosh.shilimkar@oracle.com> (supporter:RDS - RELIABLE DATAGRAM SOCKETS)
"David S. Miller" <davem@davemloft.net> (maintainer:NETWORKING [GENERAL])
netdev@vger.kernel.org (open list:RDS - RELIABLE DATAGRAM SOCKETS)
linux-rdma@vger.kernel.org (open list:RDS - RELIABLE DATAGRAM SOCKETS)
rds-devel@oss.oracle.com (moderated list:RDS - RELIABLE DATAGRAM SOCKETS)
linux-kernel@vger.kernel.org (open list)

> 
> diff --git a/net/rds/cong.c b/net/rds/cong.c
> index e6144b8..7afc1bf 100644
> --- a/net/rds/cong.c
> +++ b/net/rds/cong.c
> @@ -144,6 +144,7 @@ static struct rds_cong_map *rds_cong_from_addr(__be32 addr)
>  	if (!map)
>  		return NULL;
>  
> +	spin_lock_init(&map->m_lock);
>  	map->m_addr = addr;
>  	init_waitqueue_head(&map->m_waitq);
>  	INIT_LIST_HEAD(&map->m_conn_list);
> @@ -292,6 +293,7 @@ void rds_cong_set_bit(struct rds_cong_map *map, __be16 port)
>  {
>  	unsigned long i;
>  	unsigned long off;
> +	unsigned long flags;
>  
>  	rdsdebug("setting congestion for %pI4:%u in map %p\n",
>  	  &map->m_addr, ntohs(port), map);
> @@ -299,13 +301,16 @@ void rds_cong_set_bit(struct rds_cong_map *map, __be16 port)
>  	i = be16_to_cpu(port) / RDS_CONG_MAP_PAGE_BITS;
>  	off = be16_to_cpu(port) % RDS_CONG_MAP_PAGE_BITS;
>  
> +	spin_lock_irqsave(&map->m_lock, flags);
>  	__set_bit_le(off, (void *)map->m_page_addrs[i]);
> +	spin_unlock_irqrestore(&map->m_lock, flags);
>  }
>  
>  void rds_cong_clear_bit(struct rds_cong_map *map, __be16 port)
>  {
>  	unsigned long i;
>  	unsigned long off;
> +	unsigned long flags;
>  
>  	rdsdebug("clearing congestion for %pI4:%u in map %p\n",
>  	  &map->m_addr, ntohs(port), map);
> @@ -313,7 +318,9 @@ void rds_cong_clear_bit(struct rds_cong_map *map, __be16 port)
>  	i = be16_to_cpu(port) / RDS_CONG_MAP_PAGE_BITS;
>  	off = be16_to_cpu(port) % RDS_CONG_MAP_PAGE_BITS;
>  
> +	spin_lock_irqsave(&map->m_lock, flags);
>  	__clear_bit_le(off, (void *)map->m_page_addrs[i]);
> +	spin_unlock_irqrestore(&map->m_lock, flags);
>  }
>  
>  static int rds_cong_test_bit(struct rds_cong_map *map, __be16 port)
> diff --git a/net/rds/rds.h b/net/rds/rds.h
> index 80256b0..f359cf8 100644
> --- a/net/rds/rds.h
> +++ b/net/rds/rds.h
> @@ -59,6 +59,7 @@ struct rds_cong_map {
>  	__be32			m_addr;
>  	wait_queue_head_t	m_waitq;
>  	struct list_head	m_conn_list;
> +	spinlock_t		m_lock;
>  	unsigned long		m_page_addrs[RDS_CONG_MAP_PAGES];
>  };
>  
> -- 
> 2.1.0
> 
> --
> To unsubscribe from this list: send the line "unsubscribe linux-rdma" in
> the body of a message to majordomo@vger.kernel.org
> More majordomo info at  http://vger.kernel.org/majordomo-info.html
--
To unsubscribe from this list: send the line "unsubscribe linux-rdma" in
the body of a message to majordomo@vger.kernel.org
More majordomo info at  http://vger.kernel.org/majordomo-info.html
Santosh Shilimkar March 30, 2016, 5:16 p.m. UTC | #2
Hi Wengang,

On 3/30/2016 9:19 AM, Leon Romanovsky wrote:
> On Wed, Mar 30, 2016 at 05:08:22PM +0800, Wengang Wang wrote:
>> Problem is found that some among a lot of parallel RDS communications hang.
>> In my test ten or so among 33 communications hang. The send requests got
>> -ENOBUF error meaning the peer socket (port) is congested. But meanwhile,
>> peer socket (port) is not congested.
>>
>> The congestion map updating can happen in two paths: one is in rds_recvmsg path
>> and the other is when it receives packets from the hardware. There is no
>> synchronization when updating the congestion map. So a bit operation (clearing)
>> in the rds_recvmsg path can be skipped by another bit operation (setting) in
>> hardware packet receving path.
>>
>> Fix is to add a spin lock per congestion map to sync the update on it.
>> No performance drop found during the test for the fix.
>
> I assume that this change fixed your issue, however it looks suspicious
> that performance wasn't change.
>
First of all thanks for finding the issue and posting patch
for it. I do agree with Leon on performance comment.
We shouldn't need locks for map updates.

Moreover the parallel receive path on which this patch
is based of doesn't exist in upstream code. I have kept
that out so far because of similar issue like one you
encountered.

Anyways lets discuss offline about the fix even for the
downstream kernel. I suspect we can address it without locks.

Reagrds,
Santosh
--
To unsubscribe from this list: send the line "unsubscribe linux-rdma" in
the body of a message to majordomo@vger.kernel.org
More majordomo info at  http://vger.kernel.org/majordomo-info.html
Wengang Wang March 31, 2016, 1:24 a.m. UTC | #3
Hi Leon,

? 2016?03?31? 00:19, Leon Romanovsky ??:
> On Wed, Mar 30, 2016 at 05:08:22PM +0800, Wengang Wang wrote:
>> Problem is found that some among a lot of parallel RDS communications hang.
>> In my test ten or so among 33 communications hang. The send requests got
>> -ENOBUF error meaning the peer socket (port) is congested. But meanwhile,
>> peer socket (port) is not congested.
>>
>> The congestion map updating can happen in two paths: one is in rds_recvmsg path
>> and the other is when it receives packets from the hardware. There is no
>> synchronization when updating the congestion map. So a bit operation (clearing)
>> in the rds_recvmsg path can be skipped by another bit operation (setting) in
>> hardware packet receving path.
>>
>> Fix is to add a spin lock per congestion map to sync the update on it.
>> No performance drop found during the test for the fix.
> I assume that this change fixed your issue, however it looks suspicious
> that performance wasn't change.
Sure it I verified that patch fixes the issue.
For performance, I will reply to Santosh's email later, please check there.
>> Signed-off-by: Wengang Wang <wen.gang.wang@oracle.com>
>> ---
>>   net/rds/cong.c | 7 +++++++
>>   net/rds/rds.h  | 1 +
>>   2 files changed, 8 insertions(+)
> According to get_maintainer script, you send this patch to wrong lists
> and persons.
>
> ?  linux git:(master) ./scripts/get_maintainer.pl -f net/rds/cong.c
> Santosh Shilimkar <santosh.shilimkar@oracle.com> (supporter:RDS - RELIABLE DATAGRAM SOCKETS)
> "David S. Miller" <davem@davemloft.net> (maintainer:NETWORKING [GENERAL])
> netdev@vger.kernel.org (open list:RDS - RELIABLE DATAGRAM SOCKETS)
> linux-rdma@vger.kernel.org (open list:RDS - RELIABLE DATAGRAM SOCKETS)

So linux-rdma is here :)

thanks,
wengang
> rds-devel@oss.oracle.com (moderated list:RDS - RELIABLE DATAGRAM SOCKETS)
> linux-kernel@vger.kernel.org (open list)
>
>> diff --git a/net/rds/cong.c b/net/rds/cong.c
>> index e6144b8..7afc1bf 100644
>> --- a/net/rds/cong.c
>> +++ b/net/rds/cong.c
>> @@ -144,6 +144,7 @@ static struct rds_cong_map *rds_cong_from_addr(__be32 addr)
>>   	if (!map)
>>   		return NULL;
>>   
>> +	spin_lock_init(&map->m_lock);
>>   	map->m_addr = addr;
>>   	init_waitqueue_head(&map->m_waitq);
>>   	INIT_LIST_HEAD(&map->m_conn_list);
>> @@ -292,6 +293,7 @@ void rds_cong_set_bit(struct rds_cong_map *map, __be16 port)
>>   {
>>   	unsigned long i;
>>   	unsigned long off;
>> +	unsigned long flags;
>>   
>>   	rdsdebug("setting congestion for %pI4:%u in map %p\n",
>>   	  &map->m_addr, ntohs(port), map);
>> @@ -299,13 +301,16 @@ void rds_cong_set_bit(struct rds_cong_map *map, __be16 port)
>>   	i = be16_to_cpu(port) / RDS_CONG_MAP_PAGE_BITS;
>>   	off = be16_to_cpu(port) % RDS_CONG_MAP_PAGE_BITS;
>>   
>> +	spin_lock_irqsave(&map->m_lock, flags);
>>   	__set_bit_le(off, (void *)map->m_page_addrs[i]);
>> +	spin_unlock_irqrestore(&map->m_lock, flags);
>>   }
>>   
>>   void rds_cong_clear_bit(struct rds_cong_map *map, __be16 port)
>>   {
>>   	unsigned long i;
>>   	unsigned long off;
>> +	unsigned long flags;
>>   
>>   	rdsdebug("clearing congestion for %pI4:%u in map %p\n",
>>   	  &map->m_addr, ntohs(port), map);
>> @@ -313,7 +318,9 @@ void rds_cong_clear_bit(struct rds_cong_map *map, __be16 port)
>>   	i = be16_to_cpu(port) / RDS_CONG_MAP_PAGE_BITS;
>>   	off = be16_to_cpu(port) % RDS_CONG_MAP_PAGE_BITS;
>>   
>> +	spin_lock_irqsave(&map->m_lock, flags);
>>   	__clear_bit_le(off, (void *)map->m_page_addrs[i]);
>> +	spin_unlock_irqrestore(&map->m_lock, flags);
>>   }
>>   
>>   static int rds_cong_test_bit(struct rds_cong_map *map, __be16 port)
>> diff --git a/net/rds/rds.h b/net/rds/rds.h
>> index 80256b0..f359cf8 100644
>> --- a/net/rds/rds.h
>> +++ b/net/rds/rds.h
>> @@ -59,6 +59,7 @@ struct rds_cong_map {
>>   	__be32			m_addr;
>>   	wait_queue_head_t	m_waitq;
>>   	struct list_head	m_conn_list;
>> +	spinlock_t		m_lock;
>>   	unsigned long		m_page_addrs[RDS_CONG_MAP_PAGES];
>>   };
>>   
>> -- 
>> 2.1.0
>>
>> --
>> To unsubscribe from this list: send the line "unsubscribe linux-rdma" in
>> the body of a message to majordomo@vger.kernel.org
>> More majordomo info at  http://vger.kernel.org/majordomo-info.html
> --
> To unsubscribe from this list: send the line "unsubscribe linux-rdma" in
> the body of a message to majordomo@vger.kernel.org
> More majordomo info at  http://vger.kernel.org/majordomo-info.html

--
To unsubscribe from this list: send the line "unsubscribe linux-rdma" in
the body of a message to majordomo@vger.kernel.org
More majordomo info at  http://vger.kernel.org/majordomo-info.html
Wengang Wang March 31, 2016, 1:51 a.m. UTC | #4
? 2016?03?31? 01:16, santosh shilimkar ??:
> Hi Wengang,
>
> On 3/30/2016 9:19 AM, Leon Romanovsky wrote:
>> On Wed, Mar 30, 2016 at 05:08:22PM +0800, Wengang Wang wrote:
>>> Problem is found that some among a lot of parallel RDS 
>>> communications hang.
>>> In my test ten or so among 33 communications hang. The send requests 
>>> got
>>> -ENOBUF error meaning the peer socket (port) is congested. But 
>>> meanwhile,
>>> peer socket (port) is not congested.
>>>
>>> The congestion map updating can happen in two paths: one is in 
>>> rds_recvmsg path
>>> and the other is when it receives packets from the hardware. There 
>>> is no
>>> synchronization when updating the congestion map. So a bit operation 
>>> (clearing)
>>> in the rds_recvmsg path can be skipped by another bit operation 
>>> (setting) in
>>> hardware packet receving path.
>>>
>>> Fix is to add a spin lock per congestion map to sync the update on it.
>>> No performance drop found during the test for the fix.
>>
>> I assume that this change fixed your issue, however it looks suspicious
>> that performance wasn't change.
>>
> First of all thanks for finding the issue and posting patch
> for it. I do agree with Leon on performance comment.
> We shouldn't need locks for map updates.
>
Here is the performance data I collected yesterday.
Settings:
net.core.rmem_default = 4194304
net.core.wmem_default = 262144
net.core.rmem_max = 4194304
net.core.wmem_max = 2097152

test case:  rds-stress -s 192.168.111.16 -q 1m -d 10 -T 300 -t 10
With 1M size sends, the 10 pending send request is enough to trigger the 
congestion on receiver side. And the test last 5 mins.

result is like this:
without patch:
10   2231   2355 4697759.63       0.00       0.00  473.38 19123.89 
-1.00  (average)
receiver
  10   2356   2231 4698350.06       0.00       0.00  486.28 18537.23 
-1.00  (average)

with patch applied:
sender
10   2230   2396 47x.53       0.00       0.00  475.87 31954.35 -1.00  
(average)
receiver
10   2396   2230 4738051.76       0.00       0.00  480.85 18408.13 
-1.00  (average)

So I don't see performance drops. On a previous test, the test result is 
reverted that is it's faster when patch not applied, but the numbers is 
till 47xxxxx VS 46xxxxx.  So I don't have a very stable test result. But 
in average, no obvious performance drop.

Let me try to explain from theory:
Firstly, No matter the rds_recvmsg path or the hardware receiving data 
path, we have rds_sock->rs_recv_lock (this is not enough to fix our 
issue here since there could be many different rds_socks) locked very 
near before we lock the congestion map.  So the performance drop on CPU 
cache refilling is small.
Secondly, though the problem exist,  the malformed map may be not 
happening that frequent especially for this test case, 10 parallel 
communication.

> Moreover the parallel receive path on which this patch
> is based of doesn't exist in upstream code. I have kept
> that out so far because of similar issue like one you
> encountered.
But I don't see how rds_recvmsg path is different from UEK kernels. Can 
you explain more here or offline?

>
> Anyways lets discuss offline about the fix even for the
> downstream kernel. I suspect we can address it without locks.
>
If in normal use we have no performace issue (and before we found import 
use case that would hit), I think locking is fine.
Well, what ideas do you have to prevent using locks? After all we are 
updating a 8KB bitmap, not a single uint64 or less length variable. No 
matter we use lock or not, we need to make sure the bits to update can't 
be cached on different CPUs.

thanks,
wengang

> Reagrds,
> Santosh

--
To unsubscribe from this list: send the line "unsubscribe linux-rdma" in
the body of a message to majordomo@vger.kernel.org
More majordomo info at  http://vger.kernel.org/majordomo-info.html
Wengang Wang March 31, 2016, 2:59 a.m. UTC | #5
? 2016?03?31? 09:51, Wengang Wang ??:
>
>
> ? 2016?03?31? 01:16, santosh shilimkar ??:
>> Hi Wengang,
>>
>> On 3/30/2016 9:19 AM, Leon Romanovsky wrote:
>>> On Wed, Mar 30, 2016 at 05:08:22PM +0800, Wengang Wang wrote:
>>>> Problem is found that some among a lot of parallel RDS 
>>>> communications hang.
>>>> In my test ten or so among 33 communications hang. The send 
>>>> requests got
>>>> -ENOBUF error meaning the peer socket (port) is congested. But 
>>>> meanwhile,
>>>> peer socket (port) is not congested.
>>>>
>>>> The congestion map updating can happen in two paths: one is in 
>>>> rds_recvmsg path
>>>> and the other is when it receives packets from the hardware. There 
>>>> is no
>>>> synchronization when updating the congestion map. So a bit 
>>>> operation (clearing)
>>>> in the rds_recvmsg path can be skipped by another bit operation 
>>>> (setting) in
>>>> hardware packet receving path.
>>>>

To be more detailed.  Here, the two paths (user calls recvmsg and 
hardware receives data) are for different rds socks. thus the 
rds_sock->rs_recv_lock is not helpful to sync the updating on congestion 
map.

thanks,
wengang
>>>> Fix is to add a spin lock per congestion map to sync the update on it.
>>>> No performance drop found during the test for the fix.
>>>
>>> I assume that this change fixed your issue, however it looks suspicious
>>> that performance wasn't change.
>>>
>> First of all thanks for finding the issue and posting patch
>> for it. I do agree with Leon on performance comment.
>> We shouldn't need locks for map updates.
>>
> Here is the performance data I collected yesterday.
> Settings:
> net.core.rmem_default = 4194304
> net.core.wmem_default = 262144
> net.core.rmem_max = 4194304
> net.core.wmem_max = 2097152
>
> test case:  rds-stress -s 192.168.111.16 -q 1m -d 10 -T 300 -t 10
> With 1M size sends, the 10 pending send request is enough to trigger 
> the congestion on receiver side. And the test last 5 mins.
>
> result is like this:
> without patch:
> 10   2231   2355 4697759.63       0.00       0.00  473.38 19123.89 
> -1.00  (average)
> receiver
>  10   2356   2231 4698350.06       0.00       0.00  486.28 18537.23 
> -1.00  (average)
>
> with patch applied:
> sender
> 10   2230   2396 47x.53       0.00       0.00  475.87 31954.35 -1.00  
> (average)
> receiver
> 10   2396   2230 4738051.76       0.00       0.00  480.85 18408.13 
> -1.00  (average)
>
> So I don't see performance drops. On a previous test, the test result 
> is reverted that is it's faster when patch not applied, but the 
> numbers is till 47xxxxx VS 46xxxxx.  So I don't have a very stable 
> test result. But in average, no obvious performance drop.
>
> Let me try to explain from theory:
> Firstly, No matter the rds_recvmsg path or the hardware receiving data 
> path, we have rds_sock->rs_recv_lock (this is not enough to fix our 
> issue here since there could be many different rds_socks) locked very 
> near before we lock the congestion map.  So the performance drop on 
> CPU cache refilling is small.
> Secondly, though the problem exist,  the malformed map may be not 
> happening that frequent especially for this test case, 10 parallel 
> communication.
>
>> Moreover the parallel receive path on which this patch
>> is based of doesn't exist in upstream code. I have kept
>> that out so far because of similar issue like one you
>> encountered.
> But I don't see how rds_recvmsg path is different from UEK kernels. 
> Can you explain more here or offline?
>
>>
>> Anyways lets discuss offline about the fix even for the
>> downstream kernel. I suspect we can address it without locks.
>>
> If in normal use we have no performace issue (and before we found 
> import use case that would hit), I think locking is fine.
> Well, what ideas do you have to prevent using locks? After all we are 
> updating a 8KB bitmap, not a single uint64 or less length variable. No 
> matter we use lock or not, we need to make sure the bits to update 
> can't be cached on different CPUs.
>
> thanks,
> wengang
>
>> Reagrds,
>> Santosh
>
> -- 
> To unsubscribe from this list: send the line "unsubscribe linux-rdma" in
> the body of a message to majordomo@vger.kernel.org
> More majordomo info at  http://vger.kernel.org/majordomo-info.html

--
To unsubscribe from this list: send the line "unsubscribe linux-rdma" in
the body of a message to majordomo@vger.kernel.org
More majordomo info at  http://vger.kernel.org/majordomo-info.html
diff mbox

Patch

diff --git a/net/rds/cong.c b/net/rds/cong.c
index e6144b8..7afc1bf 100644
--- a/net/rds/cong.c
+++ b/net/rds/cong.c
@@ -144,6 +144,7 @@  static struct rds_cong_map *rds_cong_from_addr(__be32 addr)
 	if (!map)
 		return NULL;
 
+	spin_lock_init(&map->m_lock);
 	map->m_addr = addr;
 	init_waitqueue_head(&map->m_waitq);
 	INIT_LIST_HEAD(&map->m_conn_list);
@@ -292,6 +293,7 @@  void rds_cong_set_bit(struct rds_cong_map *map, __be16 port)
 {
 	unsigned long i;
 	unsigned long off;
+	unsigned long flags;
 
 	rdsdebug("setting congestion for %pI4:%u in map %p\n",
 	  &map->m_addr, ntohs(port), map);
@@ -299,13 +301,16 @@  void rds_cong_set_bit(struct rds_cong_map *map, __be16 port)
 	i = be16_to_cpu(port) / RDS_CONG_MAP_PAGE_BITS;
 	off = be16_to_cpu(port) % RDS_CONG_MAP_PAGE_BITS;
 
+	spin_lock_irqsave(&map->m_lock, flags);
 	__set_bit_le(off, (void *)map->m_page_addrs[i]);
+	spin_unlock_irqrestore(&map->m_lock, flags);
 }
 
 void rds_cong_clear_bit(struct rds_cong_map *map, __be16 port)
 {
 	unsigned long i;
 	unsigned long off;
+	unsigned long flags;
 
 	rdsdebug("clearing congestion for %pI4:%u in map %p\n",
 	  &map->m_addr, ntohs(port), map);
@@ -313,7 +318,9 @@  void rds_cong_clear_bit(struct rds_cong_map *map, __be16 port)
 	i = be16_to_cpu(port) / RDS_CONG_MAP_PAGE_BITS;
 	off = be16_to_cpu(port) % RDS_CONG_MAP_PAGE_BITS;
 
+	spin_lock_irqsave(&map->m_lock, flags);
 	__clear_bit_le(off, (void *)map->m_page_addrs[i]);
+	spin_unlock_irqrestore(&map->m_lock, flags);
 }
 
 static int rds_cong_test_bit(struct rds_cong_map *map, __be16 port)
diff --git a/net/rds/rds.h b/net/rds/rds.h
index 80256b0..f359cf8 100644
--- a/net/rds/rds.h
+++ b/net/rds/rds.h
@@ -59,6 +59,7 @@  struct rds_cong_map {
 	__be32			m_addr;
 	wait_queue_head_t	m_waitq;
 	struct list_head	m_conn_list;
+	spinlock_t		m_lock;
 	unsigned long		m_page_addrs[RDS_CONG_MAP_PAGES];
 };