diff mbox

[v2,1/2] lightnvm: specify target's logical address area

Message ID 56A88FB9.4020106@lightnvm.io (mailing list archive)
State New, archived
Headers show

Commit Message

Matias Bjorling Jan. 27, 2016, 9:36 a.m. UTC
On 01/27/2016 07:06 AM, Wenwei Tao wrote:
> Thanks.
> 
> 2016-01-27 13:52 GMT+08:00 Matias Bjørling <mb@lightnvm.io>:
>> On 01/27/2016 03:21 AM, Wenwei Tao wrote:
>>>
>>> Yes, It's a spelling mistake, will correct it in next version.
>>
>>
>> I can fix it in the version I apply. No problem.

Hi Wenwei,

I've changed it to this. Clean up the variables a bit. Is that ok with you?

Thanks

 	unsigned int version[3];
@@ -487,6 +491,10 @@ struct nvmm_type {

 	/* Statistics */
 	nvmm_lun_info_print_fn *lun_info_print;
+
+	nvmm_get_area_fn *get_area;
+	nvmm_put_area_fn *put_area;
+
 	struct list_head list;
 };




--
To unsubscribe from this list: send the line "unsubscribe linux-block" in
the body of a message to majordomo@vger.kernel.org
More majordomo info at  http://vger.kernel.org/majordomo-info.html

Comments

Wenwei Tao Jan. 27, 2016, 12:47 p.m. UTC | #1
2016-01-27 17:36 GMT+08:00 Matias Bjørling <mb@lightnvm.io>:
> On 01/27/2016 07:06 AM, Wenwei Tao wrote:
>> Thanks.
>>
>> 2016-01-27 13:52 GMT+08:00 Matias Bjørling <mb@lightnvm.io>:
>>> On 01/27/2016 03:21 AM, Wenwei Tao wrote:
>>>>
>>>> Yes, It's a spelling mistake, will correct it in next version.
>>>
>>>
>>> I can fix it in the version I apply. No problem.
>
> Hi Wenwei,
>
> I've changed it to this. Clean up the variables a bit. Is that ok with you?
>
> Thanks
>
> diff --git a/drivers/lightnvm/core.c b/drivers/lightnvm/core.c
> index 33224cb..27a59e8 100644
> --- a/drivers/lightnvm/core.c
> +++ b/drivers/lightnvm/core.c
> @@ -470,6 +470,7 @@ static int nvm_core_init(struct nvm_dev *dev)
>         dev->total_pages = dev->total_blocks * dev->pgs_per_blk;
>         INIT_LIST_HEAD(&dev->online_targets);
>         mutex_init(&dev->mlock);
> +       spin_lock_init(&dev->lock);
>
>         return 0;
>  }
> diff --git a/drivers/lightnvm/gennvm.c b/drivers/lightnvm/gennvm.c
> index 7fb725b..6e2685d 100644
> --- a/drivers/lightnvm/gennvm.c
> +++ b/drivers/lightnvm/gennvm.c
> @@ -20,6 +20,63 @@
>
>  #include "gennvm.h"
>
> +static int gennvm_get_area(struct nvm_dev *dev, sector_t *begin_sect,
> +                                                       sector_t size)
> +{
> +       struct gen_nvm *gn = dev->mp;
> +       struct gennvm_area *area, *prev;
> +       int page_size = dev->sec_size * dev->sec_per_pg;
> +       sector_t begin = 0;
> +       sector_t max_sectors = (page_size * dev->total_pages) >> 9;
> +
> +       if (size > max_sectors)
> +               return -EINVAL;
> +
> +       area = kmalloc(sizeof(struct gennvm_area), GFP_KERNEL);
> +       if (!area)
> +               return -ENOMEM;
> +
> +       spin_lock(&dev->lock);
> +       list_for_each_entry(prev, &gn->area_list, list) {
> +               if (begin + size > prev->begin) {
> +                       begin = prev->end;
> +                       continue;
> +               }
> +               break;
> +       }
> +
> +       if ((begin + size) > max_sectors) {
> +               spin_unlock(&dev->lock);
> +               kfree(area);
> +               return -EINVAL;
> +       }
> +
> +       area->begin = *begin_sect = begin;
> +       area->end = begin + size;
> +       list_add(&area->list, &prev->list);

I think I have made a mistake here. Insert the new area after prev
will not make the list increase by area->begin. And prev is not
trustable
when out of the loop, it may point to list_entry((head)->next,
typeof(*pos), member). Below is changed code:

+static int gennvm_get_area(struct nvm_dev *dev, sector_t *begin_sect,
+                                                       sector_t size)
+{
+       struct gen_nvm *gn = dev->mp;
+       struct gennvm_area *area, *prev, *next;
+       sector_t begin = 0;
+       int page_size = dev->sec_size * dev->sec_per_pg;
+       sector_t max_sectors = (page_size * dev->total_pages) >> 9;
+
+       if (size > max_sectors)
+               return -EINVAL;
+       area = kmalloc(sizeof(struct gennvm_area), GFP_KERNEL);
+       if (!area)
+               return -ENOMEM;
+
+       prev = NULL;
+
+       spin_lock(&dev->lock);
+       list_for_each_entry(next, &gn->area_list, list) {
+               if (begin + size > next->begin) {
+                       begin = next->end;
+                       prev = next;
+                       continue;
+               }
+               break;
+       }
+
+       if ((begin + size) > max_sectors) {
+               spin_unlock(&dev->lock);
+               kfree(area);
+               return -EINVAL;
+       }
+
+       area->begin = *begin_sect =  begin;
+       area->end = begin + size;
+       if (prev)
+               list_add(&area->list, &prev->list);
+       else
+               list_add(&area->list, &gn->area_list);
+       spin_unlock(&dev->lock);
+       return 0;
+}


> +       spin_unlock(&dev->lock);
> +
> +       return 0;
> +}
> +
> +static void gennvm_put_area(struct nvm_dev *dev, sector_t begin)
> +{
> +       struct gen_nvm *gn = dev->mp;
> +       struct gennvm_area *area;
> +
> +       spin_lock(&dev->lock);
> +       list_for_each_entry(area, &gn->area_list, list) {
> +               if (area->begin != begin)
> +                       continue;
> +
> +               list_del(&area->list);
> +               spin_unlock(&dev->lock);
> +               kfree(area);
> +               return;
> +       }
> +       spin_unlock(&dev->lock);
> +}
> +
>  static void gennvm_blocks_free(struct nvm_dev *dev)
>  {
>         struct gen_nvm *gn = dev->mp;
> @@ -230,6 +287,7 @@ static int gennvm_register(struct nvm_dev *dev)
>
>         gn->dev = dev;
>         gn->nr_luns = dev->nr_luns;
> +       INIT_LIST_HEAD(&gn->area_list);
>         dev->mp = gn;
>
>         ret = gennvm_luns_init(dev, gn);
> @@ -466,6 +524,10 @@ static struct nvmm_type gennvm = {
>
>         .get_lun                = gennvm_get_lun,
>         .lun_info_print         = gennvm_lun_info_print,
> +
> +       .get_area               = gennvm_get_area,
> +       .put_area               = gennvm_put_area,
> +
>  };
>
>  static int __init gennvm_module_init(void)
> diff --git a/drivers/lightnvm/gennvm.h b/drivers/lightnvm/gennvm.h
> index 9c24b5b..04d7c23 100644
> --- a/drivers/lightnvm/gennvm.h
> +++ b/drivers/lightnvm/gennvm.h
> @@ -39,8 +39,14 @@ struct gen_nvm {
>
>         int nr_luns;
>         struct gen_lun *luns;
> +       struct list_head area_list;
>  };
>
> +struct gennvm_area {
> +       struct list_head list;
> +       sector_t begin;
> +       sector_t end;   /* end is excluded */
> +};
>  #define gennvm_for_each_lun(bm, lun, i) \
>                 for ((i) = 0, lun = &(bm)->luns[0]; \
>                         (i) < (bm)->nr_luns; (i)++, lun = &(bm)->luns[(i)])
> diff --git a/drivers/lightnvm/rrpc.c b/drivers/lightnvm/rrpc.c
> index e2710da..20afe1c 100644
> --- a/drivers/lightnvm/rrpc.c
> +++ b/drivers/lightnvm/rrpc.c
> @@ -1039,7 +1039,11 @@ static int rrpc_map_init(struct rrpc *rrpc)
>  {
>         struct nvm_dev *dev = rrpc->dev;
>         sector_t i;
> -       int ret;
> +       u64 slba;
> +       int ret, page_size;
> +
> +       page_size = dev->sec_per_pg * dev->sec_size;
> +       slba = rrpc->soffset >> (ilog2(page_size) - 9);
>
>         rrpc->trans_map = vzalloc(sizeof(struct rrpc_addr) * rrpc->nr_pages);
>         if (!rrpc->trans_map)
> @@ -1062,8 +1066,8 @@ static int rrpc_map_init(struct rrpc *rrpc)
>                 return 0;
>
>         /* Bring up the mapping table from device */
> -       ret = dev->ops->get_l2p_tbl(dev, 0, dev->total_pages,
> -                                                       rrpc_l2p_update, rrpc);
> +       ret = dev->ops->get_l2p_tbl(dev, slba, rrpc->nr_pages, rrpc_l2p_update,
> +                                                                       rrpc);

In rrpc_luns_init, rrpc->nr_pages seems to be the target's sector
number and previously dev->total_pages is used, dev->total_pages is
the nvm device page number, so I am a little confusing here.

>         if (ret) {
>                 pr_err("nvm: rrpc: could not read L2P table.\n");
>                 return -EINVAL;
> @@ -1072,7 +1076,6 @@ static int rrpc_map_init(struct rrpc *rrpc)
>         return 0;
>  }
>
> -
>  /* Minimum pages needed within a lun */
>  #define PAGE_POOL_SIZE 16
>  #define ADDR_POOL_SIZE 64
> @@ -1186,12 +1189,32 @@ err:
>         return -ENOMEM;
>  }
>
> +/* returns 0 on success and stores the beginning address in *begin */
> +static int rrpc_area_init(struct rrpc *rrpc, sector_t *begin)
> +{
> +       struct nvm_dev *dev = rrpc->dev;
> +       struct nvmm_type *mt = dev->mt;
> +       sector_t size = rrpc->nr_luns * dev->sec_per_lun * dev->sec_size;
> +
> +       size >>= 9;
> +       return mt->get_area(dev, begin, size);
> +}
> +
> +static void rrpc_area_free(struct rrpc *rrpc)
> +{
> +       struct nvm_dev *dev = rrpc->dev;
> +       struct nvmm_type *mt = dev->mt;
> +
> +       mt->put_area(dev, rrpc->soffset);
> +}
> +
>  static void rrpc_free(struct rrpc *rrpc)
>  {
>         rrpc_gc_free(rrpc);
>         rrpc_map_free(rrpc);
>         rrpc_core_free(rrpc);
>         rrpc_luns_free(rrpc);
> +       rrpc_area_free(rrpc);
>
>         kfree(rrpc);
>  }
> @@ -1312,6 +1335,7 @@ static void *rrpc_init(struct nvm_dev *dev, struct
> gendisk *tdisk,
>         struct request_queue *bqueue = dev->q;
>         struct request_queue *tqueue = tdisk->queue;
>         struct rrpc *rrpc;
> +       sector_t soffset;
>         int ret;
>
>         if (!(dev->identity.dom & NVM_RSP_L2P)) {
> @@ -1337,6 +1361,13 @@ static void *rrpc_init(struct nvm_dev *dev,
> struct gendisk *tdisk,
>         /* simple round-robin strategy */
>         atomic_set(&rrpc->next_lun, -1);
>
> +       ret = rrpc_area_init(rrpc, &soffset);
> +       if (ret < 0) {
> +               pr_err("nvm: rrpc: could not initialize area\n");
> +               return ERR_PTR(ret);
> +       }
> +       rrpc->soffset = soffset;
> +
>         ret = rrpc_luns_init(rrpc, lun_begin, lun_end);
>         if (ret) {
>                 pr_err("nvm: rrpc: could not initialize luns\n");
> diff --git a/drivers/lightnvm/rrpc.h b/drivers/lightnvm/rrpc.h
> index ef13ac7..9380c68 100644
> --- a/drivers/lightnvm/rrpc.h
> +++ b/drivers/lightnvm/rrpc.h
> @@ -97,6 +97,7 @@ struct rrpc {
>         struct nvm_dev *dev;
>         struct gendisk *disk;
>
> +       sector_t soffset; /* logical sector offset */
>         u64 poffset; /* physical page offset */
>         int lun_offset;
>
> diff --git a/include/linux/lightnvm.h b/include/linux/lightnvm.h
> index d675011..18f1bb0 100644
> --- a/include/linux/lightnvm.h
> +++ b/include/linux/lightnvm.h
> @@ -351,6 +351,7 @@ struct nvm_dev {
>         char name[DISK_NAME_LEN];
>
>         struct mutex mlock;
> +       spinlock_t lock;
>  };
>
>  static inline struct ppa_addr generic_to_dev_addr(struct nvm_dev *dev,
> @@ -463,6 +464,9 @@ typedef int (nvmm_erase_blk_fn)(struct nvm_dev *,
> struct nvm_block *,
>  typedef struct nvm_lun *(nvmm_get_lun_fn)(struct nvm_dev *, int);
>  typedef void (nvmm_lun_info_print_fn)(struct nvm_dev *);
>
> +typedef int (nvmm_get_area_fn)(struct nvm_dev *, sector_t *, sector_t);
> +typedef void (nvmm_put_area_fn)(struct nvm_dev *, sector_t);
> +
>  struct nvmm_type {
>         const char *name;
>         unsigned int version[3];
> @@ -487,6 +491,10 @@ struct nvmm_type {
>
>         /* Statistics */
>         nvmm_lun_info_print_fn *lun_info_print;
> +
> +       nvmm_get_area_fn *get_area;
> +       nvmm_put_area_fn *put_area;
> +
>         struct list_head list;
>  };
>
>
>
>
--
To unsubscribe from this list: send the line "unsubscribe linux-block" in
the body of a message to majordomo@vger.kernel.org
More majordomo info at  http://vger.kernel.org/majordomo-info.html
Matias Bjorling Jan. 27, 2016, 1:26 p.m. UTC | #2
On 01/27/2016 01:47 PM, Wenwei Tao wrote:
> 2016-01-27 17:36 GMT+08:00 Matias Bjørling <mb@lightnvm.io>:
>> On 01/27/2016 07:06 AM, Wenwei Tao wrote:
>>> Thanks.
>>>
>>> 2016-01-27 13:52 GMT+08:00 Matias Bjørling <mb@lightnvm.io>:
>>>> On 01/27/2016 03:21 AM, Wenwei Tao wrote:
>>>>>
>>>>> Yes, It's a spelling mistake, will correct it in next version.
>>>>
>>>>
>>>> I can fix it in the version I apply. No problem.
>>
>> Hi Wenwei,
>>
>> I've changed it to this. Clean up the variables a bit. Is that ok with you?
>>
>> Thanks
>>
>> diff --git a/drivers/lightnvm/core.c b/drivers/lightnvm/core.c
>> index 33224cb..27a59e8 100644
>> --- a/drivers/lightnvm/core.c
>> +++ b/drivers/lightnvm/core.c
>> @@ -470,6 +470,7 @@ static int nvm_core_init(struct nvm_dev *dev)
>>         dev->total_pages = dev->total_blocks * dev->pgs_per_blk;
>>         INIT_LIST_HEAD(&dev->online_targets);
>>         mutex_init(&dev->mlock);
>> +       spin_lock_init(&dev->lock);
>>
>>         return 0;
>>  }
>> diff --git a/drivers/lightnvm/gennvm.c b/drivers/lightnvm/gennvm.c
>> index 7fb725b..6e2685d 100644
>> --- a/drivers/lightnvm/gennvm.c
>> +++ b/drivers/lightnvm/gennvm.c
>> @@ -20,6 +20,63 @@
>>
>>  #include "gennvm.h"
>>
>> +static int gennvm_get_area(struct nvm_dev *dev, sector_t *begin_sect,
>> +                                                       sector_t size)
>> +{
>> +       struct gen_nvm *gn = dev->mp;
>> +       struct gennvm_area *area, *prev;
>> +       int page_size = dev->sec_size * dev->sec_per_pg;
>> +       sector_t begin = 0;
>> +       sector_t max_sectors = (page_size * dev->total_pages) >> 9;
>> +
>> +       if (size > max_sectors)
>> +               return -EINVAL;
>> +
>> +       area = kmalloc(sizeof(struct gennvm_area), GFP_KERNEL);
>> +       if (!area)
>> +               return -ENOMEM;
>> +
>> +       spin_lock(&dev->lock);
>> +       list_for_each_entry(prev, &gn->area_list, list) {
>> +               if (begin + size > prev->begin) {
>> +                       begin = prev->end;
>> +                       continue;
>> +               }
>> +               break;
>> +       }
>> +
>> +       if ((begin + size) > max_sectors) {
>> +               spin_unlock(&dev->lock);
>> +               kfree(area);
>> +               return -EINVAL;
>> +       }
>> +
>> +       area->begin = *begin_sect = begin;
>> +       area->end = begin + size;
>> +       list_add(&area->list, &prev->list);
> 
> I think I have made a mistake here. Insert the new area after prev
> will not make the list increase by area->begin. And prev is not
> trustable
> when out of the loop, it may point to list_entry((head)->next,
> typeof(*pos), member). Below is changed code:
> 
> +static int gennvm_get_area(struct nvm_dev *dev, sector_t *begin_sect,
> +                                                       sector_t size)
> +{
> +       struct gen_nvm *gn = dev->mp;
> +       struct gennvm_area *area, *prev, *next;
> +       sector_t begin = 0;
> +       int page_size = dev->sec_size * dev->sec_per_pg;
> +       sector_t max_sectors = (page_size * dev->total_pages) >> 9;
> +
> +       if (size > max_sectors)
> +               return -EINVAL;
> +       area = kmalloc(sizeof(struct gennvm_area), GFP_KERNEL);
> +       if (!area)
> +               return -ENOMEM;
> +
> +       prev = NULL;
> +
> +       spin_lock(&dev->lock);
> +       list_for_each_entry(next, &gn->area_list, list) {
> +               if (begin + size > next->begin) {
> +                       begin = next->end;
> +                       prev = next;
> +                       continue;
> +               }
> +               break;
> +       }
> +
> +       if ((begin + size) > max_sectors) {
> +               spin_unlock(&dev->lock);
> +               kfree(area);
> +               return -EINVAL;
> +       }
> +
> +       area->begin = *begin_sect =  begin;
> +       area->end = begin + size;
> +       if (prev)
> +               list_add(&area->list, &prev->list);
> +       else
> +               list_add(&area->list, &gn->area_list);
> +       spin_unlock(&dev->lock);
> +       return 0;
> +}
> 
> 
>> +       spin_unlock(&dev->lock);
>> +
>> +       return 0;
>> +}
>> +
>> +static void gennvm_put_area(struct nvm_dev *dev, sector_t begin)
>> +{
>> +       struct gen_nvm *gn = dev->mp;
>> +       struct gennvm_area *area;
>> +
>> +       spin_lock(&dev->lock);
>> +       list_for_each_entry(area, &gn->area_list, list) {
>> +               if (area->begin != begin)
>> +                       continue;
>> +
>> +               list_del(&area->list);
>> +               spin_unlock(&dev->lock);
>> +               kfree(area);
>> +               return;
>> +       }
>> +       spin_unlock(&dev->lock);
>> +}
>> +
>>  static void gennvm_blocks_free(struct nvm_dev *dev)
>>  {
>>         struct gen_nvm *gn = dev->mp;
>> @@ -230,6 +287,7 @@ static int gennvm_register(struct nvm_dev *dev)
>>
>>         gn->dev = dev;
>>         gn->nr_luns = dev->nr_luns;
>> +       INIT_LIST_HEAD(&gn->area_list);
>>         dev->mp = gn;
>>
>>         ret = gennvm_luns_init(dev, gn);
>> @@ -466,6 +524,10 @@ static struct nvmm_type gennvm = {
>>
>>         .get_lun                = gennvm_get_lun,
>>         .lun_info_print         = gennvm_lun_info_print,
>> +
>> +       .get_area               = gennvm_get_area,
>> +       .put_area               = gennvm_put_area,
>> +
>>  };
>>
>>  static int __init gennvm_module_init(void)
>> diff --git a/drivers/lightnvm/gennvm.h b/drivers/lightnvm/gennvm.h
>> index 9c24b5b..04d7c23 100644
>> --- a/drivers/lightnvm/gennvm.h
>> +++ b/drivers/lightnvm/gennvm.h
>> @@ -39,8 +39,14 @@ struct gen_nvm {
>>
>>         int nr_luns;
>>         struct gen_lun *luns;
>> +       struct list_head area_list;
>>  };
>>
>> +struct gennvm_area {
>> +       struct list_head list;
>> +       sector_t begin;
>> +       sector_t end;   /* end is excluded */
>> +};
>>  #define gennvm_for_each_lun(bm, lun, i) \
>>                 for ((i) = 0, lun = &(bm)->luns[0]; \
>>                         (i) < (bm)->nr_luns; (i)++, lun = &(bm)->luns[(i)])
>> diff --git a/drivers/lightnvm/rrpc.c b/drivers/lightnvm/rrpc.c
>> index e2710da..20afe1c 100644
>> --- a/drivers/lightnvm/rrpc.c
>> +++ b/drivers/lightnvm/rrpc.c
>> @@ -1039,7 +1039,11 @@ static int rrpc_map_init(struct rrpc *rrpc)
>>  {
>>         struct nvm_dev *dev = rrpc->dev;
>>         sector_t i;
>> -       int ret;
>> +       u64 slba;
>> +       int ret, page_size;
>> +
>> +       page_size = dev->sec_per_pg * dev->sec_size;
>> +       slba = rrpc->soffset >> (ilog2(page_size) - 9);
>>
>>         rrpc->trans_map = vzalloc(sizeof(struct rrpc_addr) * rrpc->nr_pages);
>>         if (!rrpc->trans_map)
>> @@ -1062,8 +1066,8 @@ static int rrpc_map_init(struct rrpc *rrpc)
>>                 return 0;
>>
>>         /* Bring up the mapping table from device */
>> -       ret = dev->ops->get_l2p_tbl(dev, 0, dev->total_pages,
>> -                                                       rrpc_l2p_update, rrpc);
>> +       ret = dev->ops->get_l2p_tbl(dev, slba, rrpc->nr_pages, rrpc_l2p_update,
>> +                                                                       rrpc);
> 
> In rrpc_luns_init, rrpc->nr_pages seems to be the target's sector
> number and previously dev->total_pages is used, dev->total_pages is
> the nvm device page number, so I am a little confusing here.
> 

The dev->total pages is all pages on media. The rrpc->nr_pages is the
number of pages allocated to rrpc... which should be dev->total_pages
here, as we want to retrieve the full l2p table, and then reconstruct
the state. Thanks. Feel free to send an updated patch with the changes.

--
To unsubscribe from this list: send the line "unsubscribe linux-block" in
the body of a message to majordomo@vger.kernel.org
More majordomo info at  http://vger.kernel.org/majordomo-info.html
Wenwei Tao Jan. 27, 2016, 2:58 p.m. UTC | #3
static int nvm_core_init(struct nvm_dev *dev)
{

...
dev->sec_per_pg = grp->fpg_sz / grp->csecs;
...
/* calculated values */
dev->sec_per_pl = dev->sec_per_pg * dev->nr_planes;
dev->sec_per_blk = dev->sec_per_pl * dev->pgs_per_blk;
dev->sec_per_lun = dev->sec_per_blk * dev->blks_per_lun;
...

}

static int rrpc_luns_init(struct rrpc *rrpc, int lun_begin, int lun_end)
{
  ...
  for (i = 0; i < rrpc->nr_luns; i++) {
     ...
    rrpc->nr_pages += dev->sec_per_lun;
    ...
  }
 ...
}

I prefer rrpc->nr_pages to be the number of pages allocated to rrpc,
but the code above indeed make me confuse about the sec and page
thing.
Hope I'm not misunderstand the code.
ps: I'm not an expert on flash, if the confusion is caused by lack of
knowledge about flash, pleas let me know.

2016-01-27 21:26 GMT+08:00 Matias Bjørling <mb@lightnvm.io>:
> On 01/27/2016 01:47 PM, Wenwei Tao wrote:
>> 2016-01-27 17:36 GMT+08:00 Matias Bjørling <mb@lightnvm.io>:
>>> On 01/27/2016 07:06 AM, Wenwei Tao wrote:
>>>> Thanks.
>>>>
>>>> 2016-01-27 13:52 GMT+08:00 Matias Bjørling <mb@lightnvm.io>:
>>>>> On 01/27/2016 03:21 AM, Wenwei Tao wrote:
>>>>>>
>>>>>> Yes, It's a spelling mistake, will correct it in next version.
>>>>>
>>>>>
>>>>> I can fix it in the version I apply. No problem.
>>>
>>> Hi Wenwei,
>>>
>>> I've changed it to this. Clean up the variables a bit. Is that ok with you?
>>>
>>> Thanks
>>>
>>> diff --git a/drivers/lightnvm/core.c b/drivers/lightnvm/core.c
>>> index 33224cb..27a59e8 100644
>>> --- a/drivers/lightnvm/core.c
>>> +++ b/drivers/lightnvm/core.c
>>> @@ -470,6 +470,7 @@ static int nvm_core_init(struct nvm_dev *dev)
>>>         dev->total_pages = dev->total_blocks * dev->pgs_per_blk;
>>>         INIT_LIST_HEAD(&dev->online_targets);
>>>         mutex_init(&dev->mlock);
>>> +       spin_lock_init(&dev->lock);
>>>
>>>         return 0;
>>>  }
>>> diff --git a/drivers/lightnvm/gennvm.c b/drivers/lightnvm/gennvm.c
>>> index 7fb725b..6e2685d 100644
>>> --- a/drivers/lightnvm/gennvm.c
>>> +++ b/drivers/lightnvm/gennvm.c
>>> @@ -20,6 +20,63 @@
>>>
>>>  #include "gennvm.h"
>>>
>>> +static int gennvm_get_area(struct nvm_dev *dev, sector_t *begin_sect,
>>> +                                                       sector_t size)
>>> +{
>>> +       struct gen_nvm *gn = dev->mp;
>>> +       struct gennvm_area *area, *prev;
>>> +       int page_size = dev->sec_size * dev->sec_per_pg;
>>> +       sector_t begin = 0;
>>> +       sector_t max_sectors = (page_size * dev->total_pages) >> 9;
>>> +
>>> +       if (size > max_sectors)
>>> +               return -EINVAL;
>>> +
>>> +       area = kmalloc(sizeof(struct gennvm_area), GFP_KERNEL);
>>> +       if (!area)
>>> +               return -ENOMEM;
>>> +
>>> +       spin_lock(&dev->lock);
>>> +       list_for_each_entry(prev, &gn->area_list, list) {
>>> +               if (begin + size > prev->begin) {
>>> +                       begin = prev->end;
>>> +                       continue;
>>> +               }
>>> +               break;
>>> +       }
>>> +
>>> +       if ((begin + size) > max_sectors) {
>>> +               spin_unlock(&dev->lock);
>>> +               kfree(area);
>>> +               return -EINVAL;
>>> +       }
>>> +
>>> +       area->begin = *begin_sect = begin;
>>> +       area->end = begin + size;
>>> +       list_add(&area->list, &prev->list);
>>
>> I think I have made a mistake here. Insert the new area after prev
>> will not make the list increase by area->begin. And prev is not
>> trustable
>> when out of the loop, it may point to list_entry((head)->next,
>> typeof(*pos), member). Below is changed code:
>>
>> +static int gennvm_get_area(struct nvm_dev *dev, sector_t *begin_sect,
>> +                                                       sector_t size)
>> +{
>> +       struct gen_nvm *gn = dev->mp;
>> +       struct gennvm_area *area, *prev, *next;
>> +       sector_t begin = 0;
>> +       int page_size = dev->sec_size * dev->sec_per_pg;
>> +       sector_t max_sectors = (page_size * dev->total_pages) >> 9;
>> +
>> +       if (size > max_sectors)
>> +               return -EINVAL;
>> +       area = kmalloc(sizeof(struct gennvm_area), GFP_KERNEL);
>> +       if (!area)
>> +               return -ENOMEM;
>> +
>> +       prev = NULL;
>> +
>> +       spin_lock(&dev->lock);
>> +       list_for_each_entry(next, &gn->area_list, list) {
>> +               if (begin + size > next->begin) {
>> +                       begin = next->end;
>> +                       prev = next;
>> +                       continue;
>> +               }
>> +               break;
>> +       }
>> +
>> +       if ((begin + size) > max_sectors) {
>> +               spin_unlock(&dev->lock);
>> +               kfree(area);
>> +               return -EINVAL;
>> +       }
>> +
>> +       area->begin = *begin_sect =  begin;
>> +       area->end = begin + size;
>> +       if (prev)
>> +               list_add(&area->list, &prev->list);
>> +       else
>> +               list_add(&area->list, &gn->area_list);
>> +       spin_unlock(&dev->lock);
>> +       return 0;
>> +}
>>
>>
>>> +       spin_unlock(&dev->lock);
>>> +
>>> +       return 0;
>>> +}
>>> +
>>> +static void gennvm_put_area(struct nvm_dev *dev, sector_t begin)
>>> +{
>>> +       struct gen_nvm *gn = dev->mp;
>>> +       struct gennvm_area *area;
>>> +
>>> +       spin_lock(&dev->lock);
>>> +       list_for_each_entry(area, &gn->area_list, list) {
>>> +               if (area->begin != begin)
>>> +                       continue;
>>> +
>>> +               list_del(&area->list);
>>> +               spin_unlock(&dev->lock);
>>> +               kfree(area);
>>> +               return;
>>> +       }
>>> +       spin_unlock(&dev->lock);
>>> +}
>>> +
>>>  static void gennvm_blocks_free(struct nvm_dev *dev)
>>>  {
>>>         struct gen_nvm *gn = dev->mp;
>>> @@ -230,6 +287,7 @@ static int gennvm_register(struct nvm_dev *dev)
>>>
>>>         gn->dev = dev;
>>>         gn->nr_luns = dev->nr_luns;
>>> +       INIT_LIST_HEAD(&gn->area_list);
>>>         dev->mp = gn;
>>>
>>>         ret = gennvm_luns_init(dev, gn);
>>> @@ -466,6 +524,10 @@ static struct nvmm_type gennvm = {
>>>
>>>         .get_lun                = gennvm_get_lun,
>>>         .lun_info_print         = gennvm_lun_info_print,
>>> +
>>> +       .get_area               = gennvm_get_area,
>>> +       .put_area               = gennvm_put_area,
>>> +
>>>  };
>>>
>>>  static int __init gennvm_module_init(void)
>>> diff --git a/drivers/lightnvm/gennvm.h b/drivers/lightnvm/gennvm.h
>>> index 9c24b5b..04d7c23 100644
>>> --- a/drivers/lightnvm/gennvm.h
>>> +++ b/drivers/lightnvm/gennvm.h
>>> @@ -39,8 +39,14 @@ struct gen_nvm {
>>>
>>>         int nr_luns;
>>>         struct gen_lun *luns;
>>> +       struct list_head area_list;
>>>  };
>>>
>>> +struct gennvm_area {
>>> +       struct list_head list;
>>> +       sector_t begin;
>>> +       sector_t end;   /* end is excluded */
>>> +};
>>>  #define gennvm_for_each_lun(bm, lun, i) \
>>>                 for ((i) = 0, lun = &(bm)->luns[0]; \
>>>                         (i) < (bm)->nr_luns; (i)++, lun = &(bm)->luns[(i)])
>>> diff --git a/drivers/lightnvm/rrpc.c b/drivers/lightnvm/rrpc.c
>>> index e2710da..20afe1c 100644
>>> --- a/drivers/lightnvm/rrpc.c
>>> +++ b/drivers/lightnvm/rrpc.c
>>> @@ -1039,7 +1039,11 @@ static int rrpc_map_init(struct rrpc *rrpc)
>>>  {
>>>         struct nvm_dev *dev = rrpc->dev;
>>>         sector_t i;
>>> -       int ret;
>>> +       u64 slba;
>>> +       int ret, page_size;
>>> +
>>> +       page_size = dev->sec_per_pg * dev->sec_size;
>>> +       slba = rrpc->soffset >> (ilog2(page_size) - 9);
>>>
>>>         rrpc->trans_map = vzalloc(sizeof(struct rrpc_addr) * rrpc->nr_pages);
>>>         if (!rrpc->trans_map)
>>> @@ -1062,8 +1066,8 @@ static int rrpc_map_init(struct rrpc *rrpc)
>>>                 return 0;
>>>
>>>         /* Bring up the mapping table from device */
>>> -       ret = dev->ops->get_l2p_tbl(dev, 0, dev->total_pages,
>>> -                                                       rrpc_l2p_update, rrpc);
>>> +       ret = dev->ops->get_l2p_tbl(dev, slba, rrpc->nr_pages, rrpc_l2p_update,
>>> +                                                                       rrpc);
>>
>> In rrpc_luns_init, rrpc->nr_pages seems to be the target's sector
>> number and previously dev->total_pages is used, dev->total_pages is
>> the nvm device page number, so I am a little confusing here.
>>
>
> The dev->total pages is all pages on media. The rrpc->nr_pages is the
> number of pages allocated to rrpc... which should be dev->total_pages
> here, as we want to retrieve the full l2p table, and then reconstruct
> the state. Thanks. Feel free to send an updated patch with the changes.
>
--
To unsubscribe from this list: send the line "unsubscribe linux-block" in
the body of a message to majordomo@vger.kernel.org
More majordomo info at  http://vger.kernel.org/majordomo-info.html
Matias Bjorling Jan. 27, 2016, 7:46 p.m. UTC | #4
On 01/27/2016 03:58 PM, Wenwei Tao wrote:
> static int nvm_core_init(struct nvm_dev *dev)
> {
>
> ...
> dev->sec_per_pg = grp->fpg_sz / grp->csecs;
> ...
> /* calculated values */
> dev->sec_per_pl = dev->sec_per_pg * dev->nr_planes;
> dev->sec_per_blk = dev->sec_per_pl * dev->pgs_per_blk;
> dev->sec_per_lun = dev->sec_per_blk * dev->blks_per_lun;
> ...
>
> }
>
> static int rrpc_luns_init(struct rrpc *rrpc, int lun_begin, int lun_end)
> {
>    ...
>    for (i = 0; i < rrpc->nr_luns; i++) {
>       ...
>      rrpc->nr_pages += dev->sec_per_lun;
>      ...
>    }
>   ...
> }
>
> I prefer rrpc->nr_pages to be the number of pages allocated to rrpc,
> but the code above indeed make me confuse about the sec and page
> thing.
> Hope I'm not misunderstand the code.
> ps: I'm not an expert on flash, if the confusion is caused by lack of
> knowledge about flash, pleas let me know.

rrpc->nr_pages should properly be rrpc->nr_sects, as it is indeed 
confusing that it is called a page, when we actually use it for number 
of sectors.

Pages refers to 4-64KB units, of which we have 1-16 sectors inside.

I have pushed an update to for-next that renames rrpc->nr_pages to 
rrpc->nr_sects. That should hopefully help future readers.

I also updated dev->total_pages to total_secs.

Thanks
--
To unsubscribe from this list: send the line "unsubscribe linux-block" in
the body of a message to majordomo@vger.kernel.org
More majordomo info at  http://vger.kernel.org/majordomo-info.html
diff mbox

Patch

diff --git a/drivers/lightnvm/core.c b/drivers/lightnvm/core.c
index 33224cb..27a59e8 100644
--- a/drivers/lightnvm/core.c
+++ b/drivers/lightnvm/core.c
@@ -470,6 +470,7 @@  static int nvm_core_init(struct nvm_dev *dev)
 	dev->total_pages = dev->total_blocks * dev->pgs_per_blk;
 	INIT_LIST_HEAD(&dev->online_targets);
 	mutex_init(&dev->mlock);
+	spin_lock_init(&dev->lock);

 	return 0;
 }
diff --git a/drivers/lightnvm/gennvm.c b/drivers/lightnvm/gennvm.c
index 7fb725b..6e2685d 100644
--- a/drivers/lightnvm/gennvm.c
+++ b/drivers/lightnvm/gennvm.c
@@ -20,6 +20,63 @@ 

 #include "gennvm.h"

+static int gennvm_get_area(struct nvm_dev *dev, sector_t *begin_sect,
+							sector_t size)
+{
+	struct gen_nvm *gn = dev->mp;
+	struct gennvm_area *area, *prev;
+	int page_size = dev->sec_size * dev->sec_per_pg;
+	sector_t begin = 0;
+	sector_t max_sectors = (page_size * dev->total_pages) >> 9;
+
+	if (size > max_sectors)
+		return -EINVAL;
+
+	area = kmalloc(sizeof(struct gennvm_area), GFP_KERNEL);
+	if (!area)
+		return -ENOMEM;
+
+	spin_lock(&dev->lock);
+	list_for_each_entry(prev, &gn->area_list, list) {
+		if (begin + size > prev->begin) {
+			begin = prev->end;
+			continue;
+		}
+		break;
+	}
+
+	if ((begin + size) > max_sectors) {
+		spin_unlock(&dev->lock);
+		kfree(area);
+		return -EINVAL;
+	}
+
+	area->begin = *begin_sect = begin;
+	area->end = begin + size;
+	list_add(&area->list, &prev->list);
+	spin_unlock(&dev->lock);
+
+	return 0;
+}
+
+static void gennvm_put_area(struct nvm_dev *dev, sector_t begin)
+{
+	struct gen_nvm *gn = dev->mp;
+	struct gennvm_area *area;
+
+	spin_lock(&dev->lock);
+	list_for_each_entry(area, &gn->area_list, list) {
+		if (area->begin != begin)
+			continue;
+
+		list_del(&area->list);
+		spin_unlock(&dev->lock);
+		kfree(area);
+		return;
+	}
+	spin_unlock(&dev->lock);
+}
+
 static void gennvm_blocks_free(struct nvm_dev *dev)
 {
 	struct gen_nvm *gn = dev->mp;
@@ -230,6 +287,7 @@  static int gennvm_register(struct nvm_dev *dev)

 	gn->dev = dev;
 	gn->nr_luns = dev->nr_luns;
+	INIT_LIST_HEAD(&gn->area_list);
 	dev->mp = gn;

 	ret = gennvm_luns_init(dev, gn);
@@ -466,6 +524,10 @@  static struct nvmm_type gennvm = {

 	.get_lun		= gennvm_get_lun,
 	.lun_info_print		= gennvm_lun_info_print,
+
+	.get_area		= gennvm_get_area,
+	.put_area		= gennvm_put_area,
+
 };

 static int __init gennvm_module_init(void)
diff --git a/drivers/lightnvm/gennvm.h b/drivers/lightnvm/gennvm.h
index 9c24b5b..04d7c23 100644
--- a/drivers/lightnvm/gennvm.h
+++ b/drivers/lightnvm/gennvm.h
@@ -39,8 +39,14 @@  struct gen_nvm {

 	int nr_luns;
 	struct gen_lun *luns;
+	struct list_head area_list;
 };

+struct gennvm_area {
+	struct list_head list;
+	sector_t begin;
+	sector_t end;	/* end is excluded */
+};
 #define gennvm_for_each_lun(bm, lun, i) \
 		for ((i) = 0, lun = &(bm)->luns[0]; \
 			(i) < (bm)->nr_luns; (i)++, lun = &(bm)->luns[(i)])
diff --git a/drivers/lightnvm/rrpc.c b/drivers/lightnvm/rrpc.c
index e2710da..20afe1c 100644
--- a/drivers/lightnvm/rrpc.c
+++ b/drivers/lightnvm/rrpc.c
@@ -1039,7 +1039,11 @@  static int rrpc_map_init(struct rrpc *rrpc)
 {
 	struct nvm_dev *dev = rrpc->dev;
 	sector_t i;
-	int ret;
+	u64 slba;
+	int ret, page_size;
+
+	page_size = dev->sec_per_pg * dev->sec_size;
+	slba = rrpc->soffset >> (ilog2(page_size) - 9);

 	rrpc->trans_map = vzalloc(sizeof(struct rrpc_addr) * rrpc->nr_pages);
 	if (!rrpc->trans_map)
@@ -1062,8 +1066,8 @@  static int rrpc_map_init(struct rrpc *rrpc)
 		return 0;

 	/* Bring up the mapping table from device */
-	ret = dev->ops->get_l2p_tbl(dev, 0, dev->total_pages,
-							rrpc_l2p_update, rrpc);
+	ret = dev->ops->get_l2p_tbl(dev, slba, rrpc->nr_pages, rrpc_l2p_update,
+									rrpc);
 	if (ret) {
 		pr_err("nvm: rrpc: could not read L2P table.\n");
 		return -EINVAL;
@@ -1072,7 +1076,6 @@  static int rrpc_map_init(struct rrpc *rrpc)
 	return 0;
 }

-
 /* Minimum pages needed within a lun */
 #define PAGE_POOL_SIZE 16
 #define ADDR_POOL_SIZE 64
@@ -1186,12 +1189,32 @@  err:
 	return -ENOMEM;
 }

+/* returns 0 on success and stores the beginning address in *begin */
+static int rrpc_area_init(struct rrpc *rrpc, sector_t *begin)
+{
+	struct nvm_dev *dev = rrpc->dev;
+	struct nvmm_type *mt = dev->mt;
+	sector_t size = rrpc->nr_luns * dev->sec_per_lun * dev->sec_size;
+
+	size >>= 9;
+	return mt->get_area(dev, begin, size);
+}
+
+static void rrpc_area_free(struct rrpc *rrpc)
+{
+	struct nvm_dev *dev = rrpc->dev;
+	struct nvmm_type *mt = dev->mt;
+
+	mt->put_area(dev, rrpc->soffset);
+}
+
 static void rrpc_free(struct rrpc *rrpc)
 {
 	rrpc_gc_free(rrpc);
 	rrpc_map_free(rrpc);
 	rrpc_core_free(rrpc);
 	rrpc_luns_free(rrpc);
+	rrpc_area_free(rrpc);

 	kfree(rrpc);
 }
@@ -1312,6 +1335,7 @@  static void *rrpc_init(struct nvm_dev *dev, struct
gendisk *tdisk,
 	struct request_queue *bqueue = dev->q;
 	struct request_queue *tqueue = tdisk->queue;
 	struct rrpc *rrpc;
+	sector_t soffset;
 	int ret;

 	if (!(dev->identity.dom & NVM_RSP_L2P)) {
@@ -1337,6 +1361,13 @@  static void *rrpc_init(struct nvm_dev *dev,
struct gendisk *tdisk,
 	/* simple round-robin strategy */
 	atomic_set(&rrpc->next_lun, -1);

+	ret = rrpc_area_init(rrpc, &soffset);
+	if (ret < 0) {
+		pr_err("nvm: rrpc: could not initialize area\n");
+		return ERR_PTR(ret);
+	}
+	rrpc->soffset = soffset;
+
 	ret = rrpc_luns_init(rrpc, lun_begin, lun_end);
 	if (ret) {
 		pr_err("nvm: rrpc: could not initialize luns\n");
diff --git a/drivers/lightnvm/rrpc.h b/drivers/lightnvm/rrpc.h
index ef13ac7..9380c68 100644
--- a/drivers/lightnvm/rrpc.h
+++ b/drivers/lightnvm/rrpc.h
@@ -97,6 +97,7 @@  struct rrpc {
 	struct nvm_dev *dev;
 	struct gendisk *disk;

+	sector_t soffset; /* logical sector offset */
 	u64 poffset; /* physical page offset */
 	int lun_offset;

diff --git a/include/linux/lightnvm.h b/include/linux/lightnvm.h
index d675011..18f1bb0 100644
--- a/include/linux/lightnvm.h
+++ b/include/linux/lightnvm.h
@@ -351,6 +351,7 @@  struct nvm_dev {
 	char name[DISK_NAME_LEN];

 	struct mutex mlock;
+	spinlock_t lock;
 };

 static inline struct ppa_addr generic_to_dev_addr(struct nvm_dev *dev,
@@ -463,6 +464,9 @@  typedef int (nvmm_erase_blk_fn)(struct nvm_dev *,
struct nvm_block *,
 typedef struct nvm_lun *(nvmm_get_lun_fn)(struct nvm_dev *, int);
 typedef void (nvmm_lun_info_print_fn)(struct nvm_dev *);

+typedef int (nvmm_get_area_fn)(struct nvm_dev *, sector_t *, sector_t);
+typedef void (nvmm_put_area_fn)(struct nvm_dev *, sector_t);
+
 struct nvmm_type {
 	const char *name;