diff mbox

lightnvm: change rrpc slab creation/destruction time

Message ID 1449490577-30574-1-git-send-email-ww.tao0320@gmail.com (mailing list archive)
State New, archived
Headers show

Commit Message

Wenwei Tao Dec. 7, 2015, 12:16 p.m. UTC
create rrpc slabs during rrpc module init,
thus eliminate the lock contention and slab
status check in rrpc_core_init. And destroy
them when rrpc exit.

Signed-off-by: Wenwei Tao <ww.tao0320@gmail.com>
---
 drivers/lightnvm/rrpc.c | 54 ++++++++++++++++++++++++++++++-------------------
 1 file changed, 33 insertions(+), 21 deletions(-)

Comments

Wenwei Tao Dec. 8, 2015, 11:36 a.m. UTC | #1
Hi Matias
In my understanding  kmem_cache_create only allocate and setup some
basic structures, the actual slab memory is allocated  from buddy
system when we use these slabs.
Do you think the memory consumed by these structures is a issue
compared to the lock contention and slab status check in
rrpc_core_init?or you have any other concerns ?

2015-12-08 3:45 GMT+08:00 Matias Bjørling <mb@lightnvm.io>:
> On Mon, Dec 7, 2015 at 1:16 PM, Wenwei Tao <ww.tao0320@gmail.com> wrote:
>>
>> create rrpc slabs during rrpc module init,
>> thus eliminate the lock contention and slab
>> status check in rrpc_core_init. And destroy
>> them when rrpc exit.
>>
>> Signed-off-by: Wenwei Tao <ww.tao0320@gmail.com>
>> ---
>>  drivers/lightnvm/rrpc.c | 54
>> ++++++++++++++++++++++++++++++-------------------
>>  1 file changed, 33 insertions(+), 21 deletions(-)
>>
>> diff --git a/drivers/lightnvm/rrpc.c b/drivers/lightnvm/rrpc.c
>> index 75e59c3..a36f39a 100644
>> --- a/drivers/lightnvm/rrpc.c
>> +++ b/drivers/lightnvm/rrpc.c
>> @@ -17,7 +17,6 @@
>>  #include "rrpc.h"
>>
>>  static struct kmem_cache *rrpc_gcb_cache, *rrpc_rq_cache;
>> -static DECLARE_RWSEM(rrpc_lock);
>>
>>  static int rrpc_submit_io(struct rrpc *rrpc, struct bio *bio,
>>                                 struct nvm_rq *rqd, unsigned long flags);
>> @@ -1019,26 +1018,6 @@ static int rrpc_map_init(struct rrpc *rrpc)
>>
>>  static int rrpc_core_init(struct rrpc *rrpc)
>>  {
>> -       down_write(&rrpc_lock);
>> -       if (!rrpc_gcb_cache) {
>> -               rrpc_gcb_cache = kmem_cache_create("rrpc_gcb",
>> -                               sizeof(struct rrpc_block_gc), 0, 0, NULL);
>> -               if (!rrpc_gcb_cache) {
>> -                       up_write(&rrpc_lock);
>> -                       return -ENOMEM;
>> -               }
>> -
>> -               rrpc_rq_cache = kmem_cache_create("rrpc_rq",
>> -                               sizeof(struct nvm_rq) + sizeof(struct
>> rrpc_rq),
>> -                               0, 0, NULL);
>> -               if (!rrpc_rq_cache) {
>> -                       kmem_cache_destroy(rrpc_gcb_cache);
>> -                       up_write(&rrpc_lock);
>> -                       return -ENOMEM;
>> -               }
>> -       }
>> -       up_write(&rrpc_lock);
>> -
>>         rrpc->page_pool = mempool_create_page_pool(PAGE_POOL_SIZE, 0);
>>         if (!rrpc->page_pool)
>>                 return -ENOMEM;
>> @@ -1338,14 +1317,47 @@ static struct nvm_tgt_type tt_rrpc = {
>>         .exit           = rrpc_exit,
>>  };
>>
>> +static int __init rrpc_slab_init(void)
>> +{
>> +       rrpc_gcb_cache = kmem_cache_create("rrpc_gcb",
>> +                       sizeof(struct rrpc_block_gc), 0, 0, NULL);
>> +       if (!rrpc_gcb_cache)
>> +               goto out;
>> +
>> +       rrpc_rq_cache = kmem_cache_create("rrpc_rq",
>> +                       sizeof(struct nvm_rq) + sizeof(struct rrpc_rq),
>> +                       0, 0, NULL);
>> +       if (!rrpc_rq_cache)
>> +               goto out_free;
>> +
>> +       return 0;
>> +
>> +out_free:
>> +       kmem_cache_destroy(rrpc_gcb_cache);
>> +out:
>> +       return -ENOMEM;
>> +}
>> +
>> +static inline void rrpc_slab_free(void)
>> +{
>> +       kmem_cache_destroy(rrpc_gcb_cache);
>> +       kmem_cache_destroy(rrpc_rq_cache);
>> +}
>> +
>>  static int __init rrpc_module_init(void)
>>  {
>> +       int ret;
>> +
>> +       ret = rrpc_slab_init();
>> +       if (ret)
>> +               return ret;
>>         return nvm_register_target(&tt_rrpc);
>>  }
>>
>>  static void rrpc_module_exit(void)
>>  {
>>         nvm_unregister_target(&tt_rrpc);
>> +       rrpc_slab_free();
>>  }
>>
>>  module_init(rrpc_module_init);
>
>
> Thanks Tao. I think the previous behavior is better. That way we don't
> consume any memory until the module is in use.
--
To unsubscribe from this list: send the line "unsubscribe linux-block" in
the body of a message to majordomo@vger.kernel.org
More majordomo info at  http://vger.kernel.org/majordomo-info.html
Matias Bjørling Dec. 8, 2015, 1:26 p.m. UTC | #2
On 12/08/2015 12:36 PM, Wenwei Tao wrote:
> Hi Matias
> In my understanding  kmem_cache_create only allocate and setup some
> basic structures, the actual slab memory is allocated  from buddy
> system when we use these slabs.
> Do you think the memory consumed by these structures is a issue
> compared to the lock contention and slab status check in
> rrpc_core_init?or you have any other concerns ?

The rrpc_core_init is only run at registration time and isn't in a 
critical path. So I rather not initialize something until we really it.

ps. it is custom not to top post :)

--
To unsubscribe from this list: send the line "unsubscribe linux-block" in
the body of a message to majordomo@vger.kernel.org
More majordomo info at  http://vger.kernel.org/majordomo-info.html
diff mbox

Patch

diff --git a/drivers/lightnvm/rrpc.c b/drivers/lightnvm/rrpc.c
index 75e59c3..a36f39a 100644
--- a/drivers/lightnvm/rrpc.c
+++ b/drivers/lightnvm/rrpc.c
@@ -17,7 +17,6 @@ 
 #include "rrpc.h"
 
 static struct kmem_cache *rrpc_gcb_cache, *rrpc_rq_cache;
-static DECLARE_RWSEM(rrpc_lock);
 
 static int rrpc_submit_io(struct rrpc *rrpc, struct bio *bio,
 				struct nvm_rq *rqd, unsigned long flags);
@@ -1019,26 +1018,6 @@  static int rrpc_map_init(struct rrpc *rrpc)
 
 static int rrpc_core_init(struct rrpc *rrpc)
 {
-	down_write(&rrpc_lock);
-	if (!rrpc_gcb_cache) {
-		rrpc_gcb_cache = kmem_cache_create("rrpc_gcb",
-				sizeof(struct rrpc_block_gc), 0, 0, NULL);
-		if (!rrpc_gcb_cache) {
-			up_write(&rrpc_lock);
-			return -ENOMEM;
-		}
-
-		rrpc_rq_cache = kmem_cache_create("rrpc_rq",
-				sizeof(struct nvm_rq) + sizeof(struct rrpc_rq),
-				0, 0, NULL);
-		if (!rrpc_rq_cache) {
-			kmem_cache_destroy(rrpc_gcb_cache);
-			up_write(&rrpc_lock);
-			return -ENOMEM;
-		}
-	}
-	up_write(&rrpc_lock);
-
 	rrpc->page_pool = mempool_create_page_pool(PAGE_POOL_SIZE, 0);
 	if (!rrpc->page_pool)
 		return -ENOMEM;
@@ -1338,14 +1317,47 @@  static struct nvm_tgt_type tt_rrpc = {
 	.exit		= rrpc_exit,
 };
 
+static int __init rrpc_slab_init(void)
+{
+	rrpc_gcb_cache = kmem_cache_create("rrpc_gcb",
+			sizeof(struct rrpc_block_gc), 0, 0, NULL);
+	if (!rrpc_gcb_cache)
+		goto out;
+
+	rrpc_rq_cache = kmem_cache_create("rrpc_rq",
+			sizeof(struct nvm_rq) + sizeof(struct rrpc_rq),
+			0, 0, NULL);
+	if (!rrpc_rq_cache)
+		goto out_free;
+
+	return 0;
+
+out_free:
+	kmem_cache_destroy(rrpc_gcb_cache);
+out:
+	return -ENOMEM;
+}
+
+static inline void rrpc_slab_free(void)
+{
+	kmem_cache_destroy(rrpc_gcb_cache);
+	kmem_cache_destroy(rrpc_rq_cache);
+}
+
 static int __init rrpc_module_init(void)
 {
+	int ret;
+
+	ret = rrpc_slab_init();
+	if (ret)
+		return ret;
 	return nvm_register_target(&tt_rrpc);
 }
 
 static void rrpc_module_exit(void)
 {
 	nvm_unregister_target(&tt_rrpc);
+	rrpc_slab_free();
 }
 
 module_init(rrpc_module_init);