LKML Archive mirror
 help / color / mirror / Atom feed
* [PATCH v3 1/3] lightnvm: specify target's logical address area
@ 2016-02-04 11:34 Wenwei Tao
  2016-02-04 11:34 ` [PATCH 2/3] lightnvm: add a bitmap of luns Wenwei Tao
                   ` (2 more replies)
  0 siblings, 3 replies; 9+ messages in thread
From: Wenwei Tao @ 2016-02-04 11:34 UTC (permalink / raw)
  To: mb; +Cc: linux-kernel, linux-block

We can create more than one target on a lightnvm
device by specifying its begin lun and end lun.

But only specify the physical address area is not
enough, we need to get the corresponding non-
intersection logical address area division from
the backend device's logcial address space.
Otherwise the targets on the device might use
the same logical addresses cause incorrect
information in the device's l2p table.

Signed-off-by: Wenwei Tao <ww.tao0320@gmail.com>
---
Changes since v2:
-rebase on for-next branch
-make the list increase by area->begin

Changes since v1:
-rename some variables
-add parentheses for clarify
-make gennvm_get_area return int, and add one more sector_t* parameter
to pass the begin sector of the corresponding target
-rebase to v4.5-rc1

 drivers/lightnvm/core.c   |  1 +
 drivers/lightnvm/gennvm.c | 65 +++++++++++++++++++++++++++++++++++++++++++++++
 drivers/lightnvm/gennvm.h |  6 +++++
 drivers/lightnvm/rrpc.c   | 36 +++++++++++++++++++++++---
 drivers/lightnvm/rrpc.h   |  1 +
 include/linux/lightnvm.h  |  8 ++++++
 6 files changed, 114 insertions(+), 3 deletions(-)

diff --git a/drivers/lightnvm/core.c b/drivers/lightnvm/core.c
index 5471cc5..93c035b 100644
--- a/drivers/lightnvm/core.c
+++ b/drivers/lightnvm/core.c
@@ -466,6 +466,7 @@ static int nvm_core_init(struct nvm_dev *dev)
 	dev->total_secs = dev->nr_luns * dev->sec_per_lun;
 	INIT_LIST_HEAD(&dev->online_targets);
 	mutex_init(&dev->mlock);
+	spin_lock_init(&dev->lock);
 
 	return 0;
 }
diff --git a/drivers/lightnvm/gennvm.c b/drivers/lightnvm/gennvm.c
index d65ec36..fba3fbd 100644
--- a/drivers/lightnvm/gennvm.c
+++ b/drivers/lightnvm/gennvm.c
@@ -20,6 +20,66 @@
 
 #include "gennvm.h"
 
+static int gennvm_get_area(struct nvm_dev *dev, sector_t *begin_sect,
+							sector_t size)
+{
+	struct gen_nvm *gn = dev->mp;
+	struct gennvm_area *area, *prev, *next;
+	sector_t begin = 0;
+	sector_t max_sectors = (dev->sec_size * dev->total_secs) >> 9;
+
+	if (size > max_sectors)
+		return -EINVAL;
+	area = kmalloc(sizeof(struct gennvm_area), GFP_KERNEL);
+	if (!area)
+		return -ENOMEM;
+
+	prev = NULL;
+
+	spin_lock(&dev->lock);
+	list_for_each_entry(next, &gn->area_list, list) {
+		if (begin + size > next->begin) {
+			begin = next->end;
+			prev = next;
+			continue;
+		}
+		break;
+	}
+
+	if ((begin + size) > max_sectors) {
+		spin_unlock(&dev->lock);
+		kfree(area);
+		return -EINVAL;
+	}
+
+	area->begin = *begin_sect =  begin;
+	area->end = begin + size;
+	if (prev)
+		list_add(&area->list, &prev->list);
+	else
+		list_add(&area->list, &gn->area_list);
+	spin_unlock(&dev->lock);
+	return 0;
+}
+
+static void gennvm_put_area(struct nvm_dev *dev, sector_t begin)
+{
+	struct gen_nvm *gn = dev->mp;
+	struct gennvm_area *area;
+
+	spin_lock(&dev->lock);
+	list_for_each_entry(area, &gn->area_list, list) {
+		if (area->begin != begin)
+			continue;
+
+		list_del(&area->list);
+		spin_unlock(&dev->lock);
+		kfree(area);
+		return;
+	}
+	spin_unlock(&dev->lock);
+}
+
 static void gennvm_blocks_free(struct nvm_dev *dev)
 {
 	struct gen_nvm *gn = dev->mp;
@@ -229,6 +289,7 @@ static int gennvm_register(struct nvm_dev *dev)
 
 	gn->dev = dev;
 	gn->nr_luns = dev->nr_luns;
+	INIT_LIST_HEAD(&gn->area_list);
 	dev->mp = gn;
 
 	ret = gennvm_luns_init(dev, gn);
@@ -465,6 +526,10 @@ static struct nvmm_type gennvm = {
 
 	.get_lun		= gennvm_get_lun,
 	.lun_info_print		= gennvm_lun_info_print,
+
+	.get_area		= gennvm_get_area,
+	.put_area		= gennvm_put_area,
+
 };
 
 static int __init gennvm_module_init(void)
diff --git a/drivers/lightnvm/gennvm.h b/drivers/lightnvm/gennvm.h
index 9c24b5b..04d7c23 100644
--- a/drivers/lightnvm/gennvm.h
+++ b/drivers/lightnvm/gennvm.h
@@ -39,8 +39,14 @@ struct gen_nvm {
 
 	int nr_luns;
 	struct gen_lun *luns;
+	struct list_head area_list;
 };
 
+struct gennvm_area {
+	struct list_head list;
+	sector_t begin;
+	sector_t end;	/* end is excluded */
+};
 #define gennvm_for_each_lun(bm, lun, i) \
 		for ((i) = 0, lun = &(bm)->luns[0]; \
 			(i) < (bm)->nr_luns; (i)++, lun = &(bm)->luns[(i)])
diff --git a/drivers/lightnvm/rrpc.c b/drivers/lightnvm/rrpc.c
index c4d0b04..6ce5f73 100644
--- a/drivers/lightnvm/rrpc.c
+++ b/drivers/lightnvm/rrpc.c
@@ -1038,8 +1038,11 @@ static int rrpc_map_init(struct rrpc *rrpc)
 {
 	struct nvm_dev *dev = rrpc->dev;
 	sector_t i;
+	u64 slba;
 	int ret;
 
+	slba = rrpc->soffset >> (ilog2(dev->sec_size) - 9);
+
 	rrpc->trans_map = vzalloc(sizeof(struct rrpc_addr) * rrpc->nr_sects);
 	if (!rrpc->trans_map)
 		return -ENOMEM;
@@ -1061,8 +1064,8 @@ static int rrpc_map_init(struct rrpc *rrpc)
 		return 0;
 
 	/* Bring up the mapping table from device */
-	ret = dev->ops->get_l2p_tbl(dev, 0, dev->total_secs, rrpc_l2p_update,
-									rrpc);
+	ret = dev->ops->get_l2p_tbl(dev, slba, rrpc->nr_sects,
+			rrpc_l2p_update, rrpc);
 	if (ret) {
 		pr_err("nvm: rrpc: could not read L2P table.\n");
 		return -EINVAL;
@@ -1071,7 +1074,6 @@ static int rrpc_map_init(struct rrpc *rrpc)
 	return 0;
 }
 
-
 /* Minimum pages needed within a lun */
 #define PAGE_POOL_SIZE 16
 #define ADDR_POOL_SIZE 64
@@ -1185,12 +1187,32 @@ err:
 	return -ENOMEM;
 }
 
+/* returns 0 on success and stores the beginning address in *begin */
+static int rrpc_area_init(struct rrpc *rrpc, sector_t *begin)
+{
+	struct nvm_dev *dev = rrpc->dev;
+	struct nvmm_type *mt = dev->mt;
+	sector_t size = rrpc->nr_sects * dev->sec_size;
+
+	size >>= 9;
+	return mt->get_area(dev, begin, size);
+}
+
+static void rrpc_area_free(struct rrpc *rrpc)
+{
+	struct nvm_dev *dev = rrpc->dev;
+	struct nvmm_type *mt = dev->mt;
+
+	mt->put_area(dev, rrpc->soffset);
+}
+
 static void rrpc_free(struct rrpc *rrpc)
 {
 	rrpc_gc_free(rrpc);
 	rrpc_map_free(rrpc);
 	rrpc_core_free(rrpc);
 	rrpc_luns_free(rrpc);
+	rrpc_area_free(rrpc);
 
 	kfree(rrpc);
 }
@@ -1311,6 +1333,7 @@ static void *rrpc_init(struct nvm_dev *dev, struct gendisk *tdisk,
 	struct request_queue *bqueue = dev->q;
 	struct request_queue *tqueue = tdisk->queue;
 	struct rrpc *rrpc;
+	sector_t soffset;
 	int ret;
 
 	if (!(dev->identity.dom & NVM_RSP_L2P)) {
@@ -1336,6 +1359,13 @@ static void *rrpc_init(struct nvm_dev *dev, struct gendisk *tdisk,
 	/* simple round-robin strategy */
 	atomic_set(&rrpc->next_lun, -1);
 
+	ret = rrpc_area_init(rrpc, &soffset);
+	if (ret < 0) {
+		pr_err("nvm: rrpc: could not initialize area\n");
+		return ERR_PTR(ret);
+	}
+	rrpc->soffset = soffset;
+
 	ret = rrpc_luns_init(rrpc, lun_begin, lun_end);
 	if (ret) {
 		pr_err("nvm: rrpc: could not initialize luns\n");
diff --git a/drivers/lightnvm/rrpc.h b/drivers/lightnvm/rrpc.h
index dfca5c4..6148b14 100644
--- a/drivers/lightnvm/rrpc.h
+++ b/drivers/lightnvm/rrpc.h
@@ -97,6 +97,7 @@ struct rrpc {
 	struct nvm_dev *dev;
 	struct gendisk *disk;
 
+	sector_t soffset; /* logical sector offset */
 	u64 poffset; /* physical page offset */
 	int lun_offset;
 
diff --git a/include/linux/lightnvm.h b/include/linux/lightnvm.h
index b94f2d5..ce58ad5 100644
--- a/include/linux/lightnvm.h
+++ b/include/linux/lightnvm.h
@@ -351,6 +351,7 @@ struct nvm_dev {
 	char name[DISK_NAME_LEN];
 
 	struct mutex mlock;
+	spinlock_t lock;
 };
 
 static inline struct ppa_addr generic_to_dev_addr(struct nvm_dev *dev,
@@ -463,6 +464,9 @@ typedef int (nvmm_erase_blk_fn)(struct nvm_dev *, struct nvm_block *,
 typedef struct nvm_lun *(nvmm_get_lun_fn)(struct nvm_dev *, int);
 typedef void (nvmm_lun_info_print_fn)(struct nvm_dev *);
 
+typedef int (nvmm_get_area_fn)(struct nvm_dev *, sector_t *, sector_t);
+typedef void (nvmm_put_area_fn)(struct nvm_dev *, sector_t);
+
 struct nvmm_type {
 	const char *name;
 	unsigned int version[3];
@@ -487,6 +491,10 @@ struct nvmm_type {
 
 	/* Statistics */
 	nvmm_lun_info_print_fn *lun_info_print;
+
+	nvmm_get_area_fn *get_area;
+	nvmm_put_area_fn *put_area;
+
 	struct list_head list;
 };
 
-- 
1.8.3.1

^ permalink raw reply related	[flat|nested] 9+ messages in thread

* [PATCH 2/3] lightnvm: add a bitmap of luns
  2016-02-04 11:34 [PATCH v3 1/3] lightnvm: specify target's logical address area Wenwei Tao
@ 2016-02-04 11:34 ` Wenwei Tao
  2016-02-05 11:59   ` Matias Bjørling
  2016-02-05  2:42 ` [PATCH v3 3/3] lightnvm: add non-continuous lun target creation support Wenwei Tao
  2016-02-05 11:58 ` [PATCH v3 1/3] lightnvm: specify target's logical address area Matias Bjørling
  2 siblings, 1 reply; 9+ messages in thread
From: Wenwei Tao @ 2016-02-04 11:34 UTC (permalink / raw)
  To: mb; +Cc: linux-kernel, linux-block

Add a bitmap of luns to indicate the status
of luns: inuse/available. When create targets
do the necessary check to avoid allocating luns
that are already allocated.

Signed-off-by: Wenwei Tao <ww.tao0320@gmail.com>
---
 drivers/lightnvm/core.c   |  5 ++++
 drivers/lightnvm/gennvm.c | 18 +++++++++++++++
 drivers/lightnvm/rrpc.c   | 59 +++++++++++++++++++++++++++++++++++------------
 include/linux/lightnvm.h  |  5 ++++
 4 files changed, 72 insertions(+), 15 deletions(-)

diff --git a/drivers/lightnvm/core.c b/drivers/lightnvm/core.c
index 93c035b..11b8e2d 100644
--- a/drivers/lightnvm/core.c
+++ b/drivers/lightnvm/core.c
@@ -464,6 +464,10 @@ static int nvm_core_init(struct nvm_dev *dev)
 	dev->nr_luns = dev->luns_per_chnl * dev->nr_chnls;
 
 	dev->total_secs = dev->nr_luns * dev->sec_per_lun;
+	dev->lun_map = kcalloc(BITS_TO_LONGS(dev->nr_luns),
+		sizeof(unsigned long), GFP_KERNEL);
+	if (!dev->lun_map)
+		return -ENOMEM;
 	INIT_LIST_HEAD(&dev->online_targets);
 	mutex_init(&dev->mlock);
 	spin_lock_init(&dev->lock);
@@ -606,6 +610,7 @@ void nvm_unregister(char *disk_name)
 	up_write(&nvm_lock);
 
 	nvm_exit(dev);
+	kfree(dev->lun_map);
 	kfree(dev);
 }
 EXPORT_SYMBOL(nvm_unregister);
diff --git a/drivers/lightnvm/gennvm.c b/drivers/lightnvm/gennvm.c
index fba3fbd..adc10c2 100644
--- a/drivers/lightnvm/gennvm.c
+++ b/drivers/lightnvm/gennvm.c
@@ -190,6 +190,9 @@ static int gennvm_block_map(u64 slba, u32 nlb, __le64 *entries, void *private)
 		lun_id = div_u64(pba, dev->sec_per_lun);
 		lun = &gn->luns[lun_id];
 
+		if (!test_bit(lun_id, dev->lun_map))
+			__set_bit(lun_id, dev->lun_map);
+
 		/* Calculate block offset into lun */
 		pba = pba - (dev->sec_per_lun * lun_id);
 		blk = &lun->vlun.blocks[div_u64(pba, dev->sec_per_blk)];
@@ -480,10 +483,23 @@ static int gennvm_erase_blk(struct nvm_dev *dev, struct nvm_block *blk,
 	return nvm_erase_ppa(dev, &addr, 1);
 }
 
+static int gennvm_reserve_lun(struct nvm_dev *dev, int lunid)
+{
+	return test_and_set_bit(lunid, dev->lun_map);
+}
+
+static void gennvm_release_lun(struct nvm_dev *dev, int lunid)
+{
+	WARN_ON(!test_and_clear_bit(lunid, dev->lun_map));
+}
+
 static struct nvm_lun *gennvm_get_lun(struct nvm_dev *dev, int lunid)
 {
 	struct gen_nvm *gn = dev->mp;
 
+	if (unlikely(lunid >= dev->nr_luns))
+		return NULL;
+
 	return &gn->luns[lunid].vlun;
 }
 
@@ -525,6 +541,8 @@ static struct nvmm_type gennvm = {
 	.erase_blk		= gennvm_erase_blk,
 
 	.get_lun		= gennvm_get_lun,
+	.reserve_lun		= gennvm_reserve_lun,
+	.release_lun		= gennvm_release_lun,
 	.lun_info_print		= gennvm_lun_info_print,
 
 	.get_area		= gennvm_get_area,
diff --git a/drivers/lightnvm/rrpc.c b/drivers/lightnvm/rrpc.c
index 6ce5f73..2bd5789 100644
--- a/drivers/lightnvm/rrpc.c
+++ b/drivers/lightnvm/rrpc.c
@@ -1128,6 +1128,22 @@ static void rrpc_core_free(struct rrpc *rrpc)
 
 static void rrpc_luns_free(struct rrpc *rrpc)
 {
+	struct nvm_dev *dev = rrpc->dev;
+	struct nvm_lun *lun;
+	struct rrpc_lun *rlun;
+	int i;
+
+	if (!rrpc->luns)
+		return;
+
+	for (i = 0; i < rrpc->nr_luns; i++) {
+		rlun = &rrpc->luns[i];
+		lun = rlun->parent;
+		if (!lun)
+			break;
+		dev->mt->release_lun(dev, lun->id);
+		vfree(rlun->blocks);
+	}
 	kfree(rrpc->luns);
 }
 
@@ -1135,7 +1151,7 @@ static int rrpc_luns_init(struct rrpc *rrpc, int lun_begin, int lun_end)
 {
 	struct nvm_dev *dev = rrpc->dev;
 	struct rrpc_lun *rlun;
-	int i, j;
+	int i, j, ret = -EINVAL;
 
 	if (dev->pgs_per_blk > MAX_INVALID_PAGES_STORAGE * BITS_PER_LONG) {
 		pr_err("rrpc: number of pages per block too high.");
@@ -1151,25 +1167,26 @@ static int rrpc_luns_init(struct rrpc *rrpc, int lun_begin, int lun_end)
 
 	/* 1:1 mapping */
 	for (i = 0; i < rrpc->nr_luns; i++) {
-		struct nvm_lun *lun = dev->mt->get_lun(dev, lun_begin + i);
+		int lunid = lun_begin + i;
+		struct nvm_lun *lun;
 
-		rlun = &rrpc->luns[i];
-		rlun->rrpc = rrpc;
-		rlun->parent = lun;
-		INIT_LIST_HEAD(&rlun->prio_list);
-		INIT_LIST_HEAD(&rlun->open_list);
-		INIT_LIST_HEAD(&rlun->closed_list);
-
-		INIT_WORK(&rlun->ws_gc, rrpc_lun_gc);
-		spin_lock_init(&rlun->lock);
+		if (dev->mt->reserve_lun(dev, lunid)) {
+			pr_err("rrpc: lun %u is already allocated\n", lunid);
+			goto err;
+		}
 
-		rrpc->total_blocks += dev->blks_per_lun;
-		rrpc->nr_sects += dev->sec_per_lun;
+		lun = dev->mt->get_lun(dev, lunid);
+		if (!lun)
+			goto err;
 
+		rlun = &rrpc->luns[i];
+		rlun->parent = lun;
 		rlun->blocks = vzalloc(sizeof(struct rrpc_block) *
 						rrpc->dev->blks_per_lun);
-		if (!rlun->blocks)
+		if (!rlun->blocks) {
+			ret = -ENOMEM;
 			goto err;
+		}
 
 		for (j = 0; j < rrpc->dev->blks_per_lun; j++) {
 			struct rrpc_block *rblk = &rlun->blocks[j];
@@ -1180,11 +1197,23 @@ static int rrpc_luns_init(struct rrpc *rrpc, int lun_begin, int lun_end)
 			INIT_LIST_HEAD(&rblk->prio);
 			spin_lock_init(&rblk->lock);
 		}
+
+		rlun->rrpc = rrpc;
+		INIT_LIST_HEAD(&rlun->prio_list);
+		INIT_LIST_HEAD(&rlun->open_list);
+		INIT_LIST_HEAD(&rlun->closed_list);
+
+		INIT_WORK(&rlun->ws_gc, rrpc_lun_gc);
+		spin_lock_init(&rlun->lock);
+
+		rrpc->total_blocks += dev->blks_per_lun;
+		rrpc->nr_sects += dev->sec_per_lun;
+
 	}
 
 	return 0;
 err:
-	return -ENOMEM;
+	return ret;
 }
 
 /* returns 0 on success and stores the beginning address in *begin */
diff --git a/include/linux/lightnvm.h b/include/linux/lightnvm.h
index ce58ad5..2a17dc1 100644
--- a/include/linux/lightnvm.h
+++ b/include/linux/lightnvm.h
@@ -342,6 +342,7 @@ struct nvm_dev {
 	int nr_luns;
 	unsigned max_pages_per_blk;
 
+	unsigned long *lun_map;
 	void *ppalist_pool;
 
 	struct nvm_id identity;
@@ -462,6 +463,8 @@ typedef int (nvmm_submit_io_fn)(struct nvm_dev *, struct nvm_rq *);
 typedef int (nvmm_erase_blk_fn)(struct nvm_dev *, struct nvm_block *,
 								unsigned long);
 typedef struct nvm_lun *(nvmm_get_lun_fn)(struct nvm_dev *, int);
+typedef int (nvmm_reserve_lun)(struct nvm_dev *, int);
+typedef void (nvmm_release_lun)(struct nvm_dev *, int);
 typedef void (nvmm_lun_info_print_fn)(struct nvm_dev *);
 
 typedef int (nvmm_get_area_fn)(struct nvm_dev *, sector_t *, sector_t);
@@ -488,6 +491,8 @@ struct nvmm_type {
 
 	/* Configuration management */
 	nvmm_get_lun_fn *get_lun;
+	nvmm_reserve_lun *reserve_lun;
+	nvmm_release_lun *release_lun;
 
 	/* Statistics */
 	nvmm_lun_info_print_fn *lun_info_print;
-- 
1.8.3.1

^ permalink raw reply related	[flat|nested] 9+ messages in thread

* [PATCH v3 3/3] lightnvm: add non-continuous lun target creation support
  2016-02-04 11:34 [PATCH v3 1/3] lightnvm: specify target's logical address area Wenwei Tao
  2016-02-04 11:34 ` [PATCH 2/3] lightnvm: add a bitmap of luns Wenwei Tao
@ 2016-02-05  2:42 ` Wenwei Tao
  2016-02-05 12:55   ` Matias Bjørling
  2016-02-05 11:58 ` [PATCH v3 1/3] lightnvm: specify target's logical address area Matias Bjørling
  2 siblings, 1 reply; 9+ messages in thread
From: Wenwei Tao @ 2016-02-05  2:42 UTC (permalink / raw)
  To: mb; +Cc: linux-kernel, linux-block

When create a target, we specify the begin lunid and
the end lunid, and get the corresponding continuous
luns from media manager, if one of the luns is not free,
we failed to create the target, even if the device's
total free luns are enough.

So add non-continuous lun target creation support,
thus we can improve the backend device's space utilization.

Signed-off-by: Wenwei Tao <ww.tao0320@gmail.com>
---
Changes since v2
-rebase on for-next branch
-move luns bitmap to PATCH 2
-remove the logic to dynamically select another lun than 
the one requested
-implement lunid list in the lnvm ioctl interface

Changes since v1
-use NVM_FIXED instead NVM_C_FIXED in gennvm_get_lun
-add target creation flags check
-rebase to v4.5-rc1

 drivers/lightnvm/core.c       | 101 ++++++++++++++------
 drivers/lightnvm/rrpc.c       | 208 ++++++++++++++++++++++++++----------------
 drivers/lightnvm/rrpc.h       |   7 +-
 include/linux/lightnvm.h      |   4 +-
 include/uapi/linux/lightnvm.h |  18 ++++
 5 files changed, 228 insertions(+), 110 deletions(-)

diff --git a/drivers/lightnvm/core.c b/drivers/lightnvm/core.c
index 11b8e2d..cbfd575 100644
--- a/drivers/lightnvm/core.c
+++ b/drivers/lightnvm/core.c
@@ -622,7 +622,7 @@ static const struct block_device_operations nvm_fops = {
 static int nvm_create_target(struct nvm_dev *dev,
 						struct nvm_ioctl_create *create)
 {
-	struct nvm_ioctl_create_simple *s = &create->conf.s;
+	struct nvm_ioctl_create_conf *conf = &create->conf;
 	struct request_queue *tqueue;
 	struct gendisk *tdisk;
 	struct nvm_tgt_type *tt;
@@ -671,7 +671,7 @@ static int nvm_create_target(struct nvm_dev *dev,
 	tdisk->fops = &nvm_fops;
 	tdisk->queue = tqueue;
 
-	targetdata = tt->init(dev, tdisk, s->lun_begin, s->lun_end);
+	targetdata = tt->init(dev, tdisk, conf);
 	if (IS_ERR(targetdata))
 		goto err_init;
 
@@ -723,7 +723,6 @@ static void nvm_remove_target(struct nvm_target *t)
 static int __nvm_configure_create(struct nvm_ioctl_create *create)
 {
 	struct nvm_dev *dev;
-	struct nvm_ioctl_create_simple *s;
 
 	down_write(&nvm_lock);
 	dev = nvm_find_nvm_dev(create->dev);
@@ -733,17 +732,11 @@ static int __nvm_configure_create(struct nvm_ioctl_create *create)
 		return -EINVAL;
 	}
 
-	if (create->conf.type != NVM_CONFIG_TYPE_SIMPLE) {
+	if (create->conf.type != NVM_CONFIG_TYPE_SIMPLE &&
+		create->conf.type != NVM_CONFIG_TYPE_LIST) {
 		pr_err("nvm: config type not valid\n");
 		return -EINVAL;
 	}
-	s = &create->conf.s;
-
-	if (s->lun_begin > s->lun_end || s->lun_end > dev->nr_luns) {
-		pr_err("nvm: lun out of bound (%u:%u > %u)\n",
-			s->lun_begin, s->lun_end, dev->nr_luns);
-		return -EINVAL;
-	}
 
 	return nvm_create_target(dev, create);
 }
@@ -821,24 +814,29 @@ static int nvm_configure_remove(const char *val)
 
 static int nvm_configure_create(const char *val)
 {
-	struct nvm_ioctl_create create;
+	struct nvm_ioctl_create *create;
 	char opcode;
 	int lun_begin, lun_end, ret;
 
-	ret = sscanf(val, "%c %256s %256s %48s %u:%u", &opcode, create.dev,
-						create.tgtname, create.tgttype,
+	create = kzalloc(sizeof(struct nvm_ioctl_create), GFP_KERNEL);
+	if (!create)
+		return -ENOMEM;
+
+	ret = sscanf(val, "%c %256s %256s %48s %u:%u", &opcode, create->dev,
+					create->tgtname, create->tgttype,
 						&lun_begin, &lun_end);
 	if (ret != 6) {
 		pr_err("nvm: invalid command. Use \"opcode device name tgttype lun_begin:lun_end\".\n");
+		kfree(create);
 		return -EINVAL;
 	}
 
-	create.flags = 0;
-	create.conf.type = NVM_CONFIG_TYPE_SIMPLE;
-	create.conf.s.lun_begin = lun_begin;
-	create.conf.s.lun_end = lun_end;
+	create->flags = 0;
+	create->conf.type = NVM_CONFIG_TYPE_SIMPLE;
+	create->conf.s.lun_begin = lun_begin;
+	create->conf.s.lun_end = lun_end;
 
-	return __nvm_configure_create(&create);
+	return __nvm_configure_create(create);
 }
 
 
@@ -991,24 +989,30 @@ static long nvm_ioctl_get_devices(struct file *file, void __user *arg)
 
 static long nvm_ioctl_dev_create(struct file *file, void __user *arg)
 {
-	struct nvm_ioctl_create create;
+	struct nvm_ioctl_create *create;
 
 	if (!capable(CAP_SYS_ADMIN))
 		return -EPERM;
+	create = kzalloc(sizeof(struct nvm_ioctl_create), GFP_KERNEL);
+	if (!create)
+		return -ENOMEM;
 
-	if (copy_from_user(&create, arg, sizeof(struct nvm_ioctl_create)))
+	if (copy_from_user(&create, arg, sizeof(struct nvm_ioctl_create))) {
+		kfree(create);
 		return -EFAULT;
+	}
 
-	create.dev[DISK_NAME_LEN - 1] = '\0';
-	create.tgttype[NVM_TTYPE_NAME_MAX - 1] = '\0';
-	create.tgtname[DISK_NAME_LEN - 1] = '\0';
+	create->dev[DISK_NAME_LEN - 1] = '\0';
+	create->tgttype[NVM_TTYPE_NAME_MAX - 1] = '\0';
+	create->tgtname[DISK_NAME_LEN - 1] = '\0';
 
-	if (create.flags != 0) {
+	if (create->flags != 0) {
 		pr_err("nvm: no flags supported\n");
+		kfree(create);
 		return -EINVAL;
 	}
 
-	return __nvm_configure_create(&create);
+	return __nvm_configure_create(create);
 }
 
 static long nvm_ioctl_dev_remove(struct file *file, void __user *arg)
@@ -1031,6 +1035,49 @@ static long nvm_ioctl_dev_remove(struct file *file, void __user *arg)
 	return __nvm_configure_remove(&remove);
 }
 
+static long nvm_ioctl_dev_free_luns(struct file *file, void __user *arg)
+{
+	struct nvm_ioctl_free_luns *free_luns;
+	struct nvm_dev *dev;
+	int lunid = 0;
+
+	if (!capable(CAP_SYS_ADMIN))
+		return -EPERM;
+
+	free_luns = kzalloc(sizeof(struct nvm_ioctl_free_luns), GFP_KERNEL);
+	if (!free_luns)
+		return -ENOMEM;
+
+	if (copy_from_user(&free_luns, arg,
+		sizeof(struct nvm_ioctl_free_luns))) {
+		kfree(free_luns);
+		return -EFAULT;
+	}
+	free_luns->dev[DISK_NAME_LEN - 1] = '\0';
+	down_write(&nvm_lock);
+	dev = nvm_find_nvm_dev(free_luns->dev);
+	up_write(&nvm_lock);
+	if (!dev) {
+		pr_err("nvm: device not found\n");
+		kfree(free_luns);
+		return -EINVAL;
+	}
+
+	free_luns->nr_free_luns = 0;
+	while ((lunid = find_next_zero_bit(dev->lun_map, dev->nr_luns,
+				lunid)) < dev->nr_luns) {
+
+		if (free_luns->nr_free_luns >= NVM_LUNS_MAX) {
+			pr_err("nvm: max %u free luns can be reported.\n",
+							NVM_LUNS_MAX);
+			break;
+		}
+		free_luns->free_lunid[free_luns->nr_free_luns++] = lunid;
+		lunid++;
+	}
+	return 0;
+}
+
 static void nvm_setup_nvm_sb_info(struct nvm_sb_info *info)
 {
 	info->seqnr = 1;
@@ -1135,6 +1182,8 @@ static long nvm_ctl_ioctl(struct file *file, uint cmd, unsigned long arg)
 		return nvm_ioctl_dev_create(file, argp);
 	case NVM_DEV_REMOVE:
 		return nvm_ioctl_dev_remove(file, argp);
+	case NVM_DEV_FREE_LUNS:
+		return nvm_ioctl_dev_free_luns(file, argp);
 	case NVM_DEV_INIT:
 		return nvm_ioctl_dev_init(file, argp);
 	case NVM_DEV_FACTORY:
diff --git a/drivers/lightnvm/rrpc.c b/drivers/lightnvm/rrpc.c
index 2bd5789..88b395d 100644
--- a/drivers/lightnvm/rrpc.c
+++ b/drivers/lightnvm/rrpc.c
@@ -23,28 +23,35 @@ static int rrpc_submit_io(struct rrpc *rrpc, struct bio *bio,
 				struct nvm_rq *rqd, unsigned long flags);
 
 #define rrpc_for_each_lun(rrpc, rlun, i) \
-		for ((i) = 0, rlun = &(rrpc)->luns[0]; \
-			(i) < (rrpc)->nr_luns; (i)++, rlun = &(rrpc)->luns[(i)])
+	for ((i) = 0, rlun = &(rrpc)->luns[0]; \
+		(i) < (rrpc)->nr_luns; (i)++, rlun = &(rrpc)->luns[(i)])
+
+static inline u64 lun_poffset(struct nvm_lun *lun, struct nvm_dev *dev)
+{
+	return lun->id * dev->sec_per_lun;
+}
 
 static void rrpc_page_invalidate(struct rrpc *rrpc, struct rrpc_addr *a)
 {
 	struct rrpc_block *rblk = a->rblk;
-	unsigned int pg_offset;
+	struct rrpc_lun *rlun = rblk->rlun;
+	u64 pg_offset;
 
-	lockdep_assert_held(&rrpc->rev_lock);
+	lockdep_assert_held(&rlun->rev_lock);
 
 	if (a->addr == ADDR_EMPTY || !rblk)
 		return;
 
 	spin_lock(&rblk->lock);
 
-	div_u64_rem(a->addr, rrpc->dev->pgs_per_blk, &pg_offset);
+	div_u64_rem(a->addr, rrpc->dev->pgs_per_blk, (u32 *)&pg_offset);
 	WARN_ON(test_and_set_bit(pg_offset, rblk->invalid_pages));
 	rblk->nr_invalid_pages++;
 
 	spin_unlock(&rblk->lock);
 
-	rrpc->rev_trans_map[a->addr - rrpc->poffset].addr = ADDR_EMPTY;
+	pg_offset = lun_poffset(rlun->parent, rrpc->dev);
+	rlun->rev_trans_map[a->addr - pg_offset].addr = ADDR_EMPTY;
 }
 
 static void rrpc_invalidate_range(struct rrpc *rrpc, sector_t slba,
@@ -52,14 +59,15 @@ static void rrpc_invalidate_range(struct rrpc *rrpc, sector_t slba,
 {
 	sector_t i;
 
-	spin_lock(&rrpc->rev_lock);
 	for (i = slba; i < slba + len; i++) {
 		struct rrpc_addr *gp = &rrpc->trans_map[i];
+		struct rrpc_lun *rlun = gp->rblk->rlun;
 
+		spin_lock(&rlun->rev_lock);
 		rrpc_page_invalidate(rrpc, gp);
+		spin_unlock(&rlun->rev_lock);
 		gp->rblk = NULL;
 	}
-	spin_unlock(&rrpc->rev_lock);
 }
 
 static struct nvm_rq *rrpc_inflight_laddr_acquire(struct rrpc *rrpc,
@@ -281,13 +289,14 @@ static void rrpc_end_sync_bio(struct bio *bio)
 static int rrpc_move_valid_pages(struct rrpc *rrpc, struct rrpc_block *rblk)
 {
 	struct request_queue *q = rrpc->dev->q;
+	struct rrpc_lun *rlun = rblk->rlun;
 	struct rrpc_rev_addr *rev;
 	struct nvm_rq *rqd;
 	struct bio *bio;
 	struct page *page;
 	int slot;
 	int nr_pgs_per_blk = rrpc->dev->pgs_per_blk;
-	u64 phys_addr;
+	u64 phys_addr, poffset;
 	DECLARE_COMPLETION_ONSTACK(wait);
 
 	if (bitmap_full(rblk->invalid_pages, nr_pgs_per_blk))
@@ -303,6 +312,7 @@ static int rrpc_move_valid_pages(struct rrpc *rrpc, struct rrpc_block *rblk)
 	if (!page)
 		return -ENOMEM;
 
+	poffset = lun_poffset(rlun->parent, rrpc->dev);
 	while ((slot = find_first_zero_bit(rblk->invalid_pages,
 					    nr_pgs_per_blk)) < nr_pgs_per_blk) {
 
@@ -310,23 +320,23 @@ static int rrpc_move_valid_pages(struct rrpc *rrpc, struct rrpc_block *rblk)
 		phys_addr = (rblk->parent->id * nr_pgs_per_blk) + slot;
 
 try:
-		spin_lock(&rrpc->rev_lock);
+		spin_lock(&rlun->rev_lock);
 		/* Get logical address from physical to logical table */
-		rev = &rrpc->rev_trans_map[phys_addr - rrpc->poffset];
+		rev = &rlun->rev_trans_map[phys_addr - poffset];
 		/* already updated by previous regular write */
 		if (rev->addr == ADDR_EMPTY) {
-			spin_unlock(&rrpc->rev_lock);
+			spin_unlock(&rlun->rev_lock);
 			continue;
 		}
 
 		rqd = rrpc_inflight_laddr_acquire(rrpc, rev->addr, 1);
 		if (IS_ERR_OR_NULL(rqd)) {
-			spin_unlock(&rrpc->rev_lock);
+			spin_unlock(&rlun->rev_lock);
 			schedule();
 			goto try;
 		}
 
-		spin_unlock(&rrpc->rev_lock);
+		spin_unlock(&rlun->rev_lock);
 
 		/* Perform read to do GC */
 		bio->bi_iter.bi_sector = rrpc_get_sector(rev->addr);
@@ -395,7 +405,7 @@ static void rrpc_block_gc(struct work_struct *work)
 	struct rrpc_block *rblk = gcb->rblk;
 	struct nvm_dev *dev = rrpc->dev;
 	struct nvm_lun *lun = rblk->parent->lun;
-	struct rrpc_lun *rlun = &rrpc->luns[lun->id - rrpc->lun_offset];
+	struct rrpc_lun *rlun = lun->private;
 
 	mempool_free(gcb, rrpc->gcb_pool);
 	pr_debug("nvm: block '%lu' being reclaimed\n", rblk->parent->id);
@@ -498,7 +508,7 @@ static void rrpc_gc_queue(struct work_struct *work)
 	struct rrpc_block *rblk = gcb->rblk;
 	struct nvm_lun *lun = rblk->parent->lun;
 	struct nvm_block *blk = rblk->parent;
-	struct rrpc_lun *rlun = &rrpc->luns[lun->id - rrpc->lun_offset];
+	struct rrpc_lun *rlun = lun->private;
 
 	spin_lock(&rlun->lock);
 	list_add_tail(&rblk->prio, &rlun->prio_list);
@@ -549,22 +559,24 @@ static struct rrpc_lun *rrpc_get_lun_rr(struct rrpc *rrpc, int is_gc)
 static struct rrpc_addr *rrpc_update_map(struct rrpc *rrpc, sector_t laddr,
 					struct rrpc_block *rblk, u64 paddr)
 {
+	struct rrpc_lun *rlun = rblk->rlun;
 	struct rrpc_addr *gp;
 	struct rrpc_rev_addr *rev;
+	u64 poffset = lun_poffset(rlun->parent, rrpc->dev);
 
 	BUG_ON(laddr >= rrpc->nr_sects);
 
 	gp = &rrpc->trans_map[laddr];
-	spin_lock(&rrpc->rev_lock);
+	spin_lock(&rlun->rev_lock);
 	if (gp->rblk)
 		rrpc_page_invalidate(rrpc, gp);
 
 	gp->addr = paddr;
 	gp->rblk = rblk;
 
-	rev = &rrpc->rev_trans_map[gp->addr - rrpc->poffset];
+	rev = &rlun->rev_trans_map[gp->addr - poffset];
 	rev->addr = laddr;
-	spin_unlock(&rrpc->rev_lock);
+	spin_unlock(&rlun->rev_lock);
 
 	return gp;
 }
@@ -953,8 +965,6 @@ static void rrpc_requeue(struct work_struct *work)
 
 static void rrpc_gc_free(struct rrpc *rrpc)
 {
-	struct rrpc_lun *rlun;
-	int i;
 
 	if (rrpc->krqd_wq)
 		destroy_workqueue(rrpc->krqd_wq);
@@ -962,16 +972,6 @@ static void rrpc_gc_free(struct rrpc *rrpc)
 	if (rrpc->kgc_wq)
 		destroy_workqueue(rrpc->kgc_wq);
 
-	if (!rrpc->luns)
-		return;
-
-	for (i = 0; i < rrpc->nr_luns; i++) {
-		rlun = &rrpc->luns[i];
-
-		if (!rlun->blocks)
-			break;
-		vfree(rlun->blocks);
-	}
 }
 
 static int rrpc_gc_init(struct rrpc *rrpc)
@@ -992,7 +992,6 @@ static int rrpc_gc_init(struct rrpc *rrpc)
 
 static void rrpc_map_free(struct rrpc *rrpc)
 {
-	vfree(rrpc->rev_trans_map);
 	vfree(rrpc->trans_map);
 }
 
@@ -1000,8 +999,8 @@ static int rrpc_l2p_update(u64 slba, u32 nlb, __le64 *entries, void *private)
 {
 	struct rrpc *rrpc = (struct rrpc *)private;
 	struct nvm_dev *dev = rrpc->dev;
-	struct rrpc_addr *addr = rrpc->trans_map + slba;
-	struct rrpc_rev_addr *raddr = rrpc->rev_trans_map;
+	struct rrpc_addr *addr;
+	struct rrpc_rev_addr *raddr;
 	u64 elba = slba + nlb;
 	u64 i;
 
@@ -1010,8 +1009,15 @@ static int rrpc_l2p_update(u64 slba, u32 nlb, __le64 *entries, void *private)
 		return -EINVAL;
 	}
 
+	slba -= rrpc->soffset >> (ilog2(dev->sec_size) - 9);
+	addr = rrpc->trans_map + slba;
 	for (i = 0; i < nlb; i++) {
+		struct rrpc_lun *rlun;
+		struct nvm_lun *lun;
 		u64 pba = le64_to_cpu(entries[i]);
+		u64 poffset;
+		int lunid;
+
 		/* LNVM treats address-spaces as silos, LBA and PBA are
 		 * equally large and zero-indexed.
 		 */
@@ -1026,9 +1032,16 @@ static int rrpc_l2p_update(u64 slba, u32 nlb, __le64 *entries, void *private)
 		 */
 		if (!pba)
 			continue;
+		lunid = div_u64(pba, dev->sec_per_lun);
+		lun = dev->mt->get_lun(dev, lunid);
+		if (unlikely(!lun))
+			return -EINVAL;
+		rlun = lun->private;
+		raddr = rlun->rev_trans_map;
+		poffset = lun_poffset(lun, dev);
 
 		addr[i].addr = pba;
-		raddr[pba].addr = slba + i;
+		raddr[pba - poffset].addr = slba + i;
 	}
 
 	return 0;
@@ -1047,17 +1060,10 @@ static int rrpc_map_init(struct rrpc *rrpc)
 	if (!rrpc->trans_map)
 		return -ENOMEM;
 
-	rrpc->rev_trans_map = vmalloc(sizeof(struct rrpc_rev_addr)
-							* rrpc->nr_sects);
-	if (!rrpc->rev_trans_map)
-		return -ENOMEM;
-
 	for (i = 0; i < rrpc->nr_sects; i++) {
 		struct rrpc_addr *p = &rrpc->trans_map[i];
-		struct rrpc_rev_addr *r = &rrpc->rev_trans_map[i];
 
 		p->addr = ADDR_EMPTY;
-		r->addr = ADDR_EMPTY;
 	}
 
 	if (!dev->ops->get_l2p_tbl)
@@ -1129,8 +1135,8 @@ static void rrpc_core_free(struct rrpc *rrpc)
 static void rrpc_luns_free(struct rrpc *rrpc)
 {
 	struct nvm_dev *dev = rrpc->dev;
-	struct nvm_lun *lun;
 	struct rrpc_lun *rlun;
+	struct nvm_lun *lun;
 	int i;
 
 	if (!rrpc->luns)
@@ -1142,24 +1148,68 @@ static void rrpc_luns_free(struct rrpc *rrpc)
 		if (!lun)
 			break;
 		dev->mt->release_lun(dev, lun->id);
+		vfree(rlun->rev_trans_map);
 		vfree(rlun->blocks);
 	}
 	kfree(rrpc->luns);
+	rrpc->luns = NULL;
+
+}
+
+static int rrpc_lun_init(struct rrpc *rrpc, struct rrpc_lun *rlun,
+			struct nvm_lun *lun)
+{
+	struct nvm_dev *dev = rrpc->dev;
+	int i;
+
+	rlun->rev_trans_map = vmalloc(sizeof(struct rrpc_rev_addr) *
+					dev->sec_per_lun);
+	if (!rlun->rev_trans_map)
+		return -ENOMEM;
+
+	for (i = 0; i < dev->sec_per_lun; i++) {
+		struct rrpc_rev_addr *r = &rlun->rev_trans_map[i];
+
+		r->addr = ADDR_EMPTY;
+	}
+	rlun->blocks = vzalloc(sizeof(struct rrpc_block) * dev->blks_per_lun);
+	if (!rlun->blocks) {
+		vfree(rlun->rev_trans_map);
+		return -ENOMEM;
+	}
+
+	for (i = 0; i < dev->blks_per_lun; i++) {
+		struct rrpc_block *rblk = &rlun->blocks[i];
+		struct nvm_block *blk = &lun->blocks[i];
+
+		rblk->parent = blk;
+		rblk->rlun = rlun;
+		INIT_LIST_HEAD(&rblk->prio);
+		spin_lock_init(&rblk->lock);
+	}
+
+	rlun->rrpc = rrpc;
+	lun->private = rlun;
+	INIT_LIST_HEAD(&rlun->prio_list);
+	INIT_LIST_HEAD(&rlun->open_list);
+	INIT_LIST_HEAD(&rlun->closed_list);
+	INIT_WORK(&rlun->ws_gc, rrpc_lun_gc);
+	spin_lock_init(&rlun->lock);
+	spin_lock_init(&rlun->rev_lock);
+	return 0;
 }
 
-static int rrpc_luns_init(struct rrpc *rrpc, int lun_begin, int lun_end)
+static int rrpc_luns_init(struct rrpc *rrpc, struct nvm_ioctl_create_conf *conf)
 {
 	struct nvm_dev *dev = rrpc->dev;
 	struct rrpc_lun *rlun;
-	int i, j, ret = -EINVAL;
+	int i, ret = -EINVAL;
 
 	if (dev->pgs_per_blk > MAX_INVALID_PAGES_STORAGE * BITS_PER_LONG) {
 		pr_err("rrpc: number of pages per block too high.");
 		return -EINVAL;
 	}
 
-	spin_lock_init(&rrpc->rev_lock);
-
 	rrpc->luns = kcalloc(rrpc->nr_luns, sizeof(struct rrpc_lun),
 								GFP_KERNEL);
 	if (!rrpc->luns)
@@ -1167,9 +1217,20 @@ static int rrpc_luns_init(struct rrpc *rrpc, int lun_begin, int lun_end)
 
 	/* 1:1 mapping */
 	for (i = 0; i < rrpc->nr_luns; i++) {
-		int lunid = lun_begin + i;
 		struct nvm_lun *lun;
+		int lunid;
 
+		if (conf->type == NVM_CONFIG_TYPE_SIMPLE)
+			lunid = conf->s.lun_begin + i;
+		else if (conf->type == NVM_CONFIG_TYPE_LIST)
+			lunid = conf->l.lunid[i];
+		else
+			goto err;
+		if (lunid >= dev->nr_luns) {
+			pr_err("rrpc: lun out of bound (%u >= %u)\n",
+						lunid, dev->nr_luns);
+			goto err;
+		}
 		if (dev->mt->reserve_lun(dev, lunid)) {
 			pr_err("rrpc: lun %u is already allocated\n", lunid);
 			goto err;
@@ -1181,31 +1242,9 @@ static int rrpc_luns_init(struct rrpc *rrpc, int lun_begin, int lun_end)
 
 		rlun = &rrpc->luns[i];
 		rlun->parent = lun;
-		rlun->blocks = vzalloc(sizeof(struct rrpc_block) *
-						rrpc->dev->blks_per_lun);
-		if (!rlun->blocks) {
-			ret = -ENOMEM;
+		ret = rrpc_lun_init(rrpc, rlun, lun);
+		if (!ret)
 			goto err;
-		}
-
-		for (j = 0; j < rrpc->dev->blks_per_lun; j++) {
-			struct rrpc_block *rblk = &rlun->blocks[j];
-			struct nvm_block *blk = &lun->blocks[j];
-
-			rblk->parent = blk;
-			rblk->rlun = rlun;
-			INIT_LIST_HEAD(&rblk->prio);
-			spin_lock_init(&rblk->lock);
-		}
-
-		rlun->rrpc = rrpc;
-		INIT_LIST_HEAD(&rlun->prio_list);
-		INIT_LIST_HEAD(&rlun->open_list);
-		INIT_LIST_HEAD(&rlun->closed_list);
-
-		INIT_WORK(&rlun->ws_gc, rrpc_lun_gc);
-		spin_lock_init(&rlun->lock);
-
 		rrpc->total_blocks += dev->blks_per_lun;
 		rrpc->nr_sects += dev->sec_per_lun;
 
@@ -1213,6 +1252,7 @@ static int rrpc_luns_init(struct rrpc *rrpc, int lun_begin, int lun_end)
 
 	return 0;
 err:
+	rrpc_luns_free(rrpc);
 	return ret;
 }
 
@@ -1285,14 +1325,16 @@ static sector_t rrpc_capacity(void *private)
 static void rrpc_block_map_update(struct rrpc *rrpc, struct rrpc_block *rblk)
 {
 	struct nvm_dev *dev = rrpc->dev;
+	struct rrpc_lun *rlun = rblk->rlun;
 	int offset;
 	struct rrpc_addr *laddr;
-	u64 paddr, pladdr;
+	u64 paddr, pladdr, poffset;
 
+	poffset = lun_poffset(rlun->parent, dev);
 	for (offset = 0; offset < dev->pgs_per_blk; offset++) {
 		paddr = block_to_addr(rrpc, rblk) + offset;
 
-		pladdr = rrpc->rev_trans_map[paddr].addr;
+		pladdr = rlun->rev_trans_map[paddr - poffset].addr;
 		if (pladdr == ADDR_EMPTY)
 			continue;
 
@@ -1357,7 +1399,7 @@ err:
 static struct nvm_tgt_type tt_rrpc;
 
 static void *rrpc_init(struct nvm_dev *dev, struct gendisk *tdisk,
-						int lun_begin, int lun_end)
+		struct nvm_ioctl_create_conf *conf)
 {
 	struct request_queue *bqueue = dev->q;
 	struct request_queue *tqueue = tdisk->queue;
@@ -1383,7 +1425,16 @@ static void *rrpc_init(struct nvm_dev *dev, struct gendisk *tdisk,
 	spin_lock_init(&rrpc->bio_lock);
 	INIT_WORK(&rrpc->ws_requeue, rrpc_requeue);
 
-	rrpc->nr_luns = lun_end - lun_begin + 1;
+	if (conf->type == NVM_CONFIG_TYPE_SIMPLE)
+		rrpc->nr_luns =
+		conf->s.lun_end  - conf->s.lun_begin + 1;
+
+	else if (conf->type == NVM_CONFIG_TYPE_LIST)
+		rrpc->nr_luns = conf->l.nr_luns;
+	else {
+		kfree(rrpc);
+		return ERR_PTR(-EINVAL);
+	}
 
 	/* simple round-robin strategy */
 	atomic_set(&rrpc->next_lun, -1);
@@ -1395,15 +1446,12 @@ static void *rrpc_init(struct nvm_dev *dev, struct gendisk *tdisk,
 	}
 	rrpc->soffset = soffset;
 
-	ret = rrpc_luns_init(rrpc, lun_begin, lun_end);
+	ret = rrpc_luns_init(rrpc, conf);
 	if (ret) {
 		pr_err("nvm: rrpc: could not initialize luns\n");
 		goto err;
 	}
 
-	rrpc->poffset = dev->sec_per_lun * lun_begin;
-	rrpc->lun_offset = lun_begin;
-
 	ret = rrpc_core_init(rrpc);
 	if (ret) {
 		pr_err("nvm: rrpc: could not initialize core\n");
diff --git a/drivers/lightnvm/rrpc.h b/drivers/lightnvm/rrpc.h
index 6148b14..abe9135 100644
--- a/drivers/lightnvm/rrpc.h
+++ b/drivers/lightnvm/rrpc.h
@@ -87,6 +87,10 @@ struct rrpc_lun {
 
 	struct work_struct ws_gc;
 
+	/* store a reverse map for garbage collection */
+	struct rrpc_rev_addr *rev_trans_map;
+	spinlock_t rev_lock;
+
 	spinlock_t lock;
 };
 
@@ -124,9 +128,6 @@ struct rrpc {
 	 * addresses are used when writing to the disk block device.
 	 */
 	struct rrpc_addr *trans_map;
-	/* also store a reverse map for garbage collection */
-	struct rrpc_rev_addr *rev_trans_map;
-	spinlock_t rev_lock;
 
 	struct rrpc_inflight inflights;
 
diff --git a/include/linux/lightnvm.h b/include/linux/lightnvm.h
index 2a17dc1..d2f2632 100644
--- a/include/linux/lightnvm.h
+++ b/include/linux/lightnvm.h
@@ -271,6 +271,7 @@ struct nvm_lun {
 	spinlock_t lock;
 
 	struct nvm_block *blocks;
+	void *private;
 };
 
 enum {
@@ -425,7 +426,8 @@ static inline int ppa_to_slc(struct nvm_dev *dev, int slc_pg)
 
 typedef blk_qc_t (nvm_tgt_make_rq_fn)(struct request_queue *, struct bio *);
 typedef sector_t (nvm_tgt_capacity_fn)(void *);
-typedef void *(nvm_tgt_init_fn)(struct nvm_dev *, struct gendisk *, int, int);
+typedef void *(nvm_tgt_init_fn)(struct nvm_dev *, struct gendisk *,
+			struct nvm_ioctl_create_conf *);
 typedef void (nvm_tgt_exit_fn)(void *);
 
 struct nvm_tgt_type {
diff --git a/include/uapi/linux/lightnvm.h b/include/uapi/linux/lightnvm.h
index 774a431..23ebc0c9 100644
--- a/include/uapi/linux/lightnvm.h
+++ b/include/uapi/linux/lightnvm.h
@@ -35,6 +35,8 @@
 #define NVM_TTYPE_MAX 63
 #define NVM_MMTYPE_LEN 8
 
+#define NVM_LUNS_MAX 1024
+
 #define NVM_CTRL_FILE "/dev/lightnvm/control"
 
 struct nvm_ioctl_info_tgt {
@@ -74,14 +76,21 @@ struct nvm_ioctl_create_simple {
 	__u32 lun_end;
 };
 
+struct nvm_ioctl_create_list {
+	__u32 nr_luns;
+	__u32 lunid[NVM_LUNS_MAX];
+};
+
 enum {
 	NVM_CONFIG_TYPE_SIMPLE = 0,
+	NVM_CONFIG_TYPE_LIST,
 };
 
 struct nvm_ioctl_create_conf {
 	__u32 type;
 	union {
 		struct nvm_ioctl_create_simple s;
+		struct nvm_ioctl_create_list l;
 	};
 };
 
@@ -101,6 +110,12 @@ struct nvm_ioctl_remove {
 	__u32 flags;
 };
 
+struct nvm_ioctl_free_luns {
+	char dev[DISK_NAME_LEN];
+	__u32 nr_free_luns;
+	__u32 free_lunid[NVM_LUNS_MAX];
+};
+
 struct nvm_ioctl_dev_init {
 	char dev[DISK_NAME_LEN];		/* open-channel SSD device */
 	char mmtype[NVM_MMTYPE_LEN];		/* register to media manager */
@@ -131,6 +146,7 @@ enum {
 	/* device level cmds */
 	NVM_DEV_CREATE_CMD,
 	NVM_DEV_REMOVE_CMD,
+	NVM_DEV_FREE_LUNS_CMD,
 
 	/* Init a device to support LightNVM media managers */
 	NVM_DEV_INIT_CMD,
@@ -149,6 +165,8 @@ enum {
 						struct nvm_ioctl_create)
 #define NVM_DEV_REMOVE		_IOW(NVM_IOCTL, NVM_DEV_REMOVE_CMD, \
 						struct nvm_ioctl_remove)
+#define NVM_DEV_FREE_LUNS	_IOW(NVM_IOCTL,	NVM_DEV_FREE_LUNS_CMD, \
+						struct nvm_ioctl_free_luns)
 #define NVM_DEV_INIT		_IOW(NVM_IOCTL, NVM_DEV_INIT_CMD, \
 						struct nvm_ioctl_dev_init)
 #define NVM_DEV_FACTORY		_IOW(NVM_IOCTL, NVM_DEV_FACTORY_CMD, \
-- 
1.8.3.1

^ permalink raw reply related	[flat|nested] 9+ messages in thread

* Re: [PATCH v3 1/3] lightnvm: specify target's logical address area
  2016-02-04 11:34 [PATCH v3 1/3] lightnvm: specify target's logical address area Wenwei Tao
  2016-02-04 11:34 ` [PATCH 2/3] lightnvm: add a bitmap of luns Wenwei Tao
  2016-02-05  2:42 ` [PATCH v3 3/3] lightnvm: add non-continuous lun target creation support Wenwei Tao
@ 2016-02-05 11:58 ` Matias Bjørling
  2 siblings, 0 replies; 9+ messages in thread
From: Matias Bjørling @ 2016-02-05 11:58 UTC (permalink / raw)
  To: Wenwei Tao; +Cc: linux-kernel, linux-block

On 02/04/2016 12:34 PM, Wenwei Tao wrote:
> We can create more than one target on a lightnvm
> device by specifying its begin lun and end lun.
> 
> But only specify the physical address area is not
> enough, we need to get the corresponding non-
> intersection logical address area division from
> the backend device's logcial address space.
> Otherwise the targets on the device might use
> the same logical addresses cause incorrect
> information in the device's l2p table.
> 
> Signed-off-by: Wenwei Tao <ww.tao0320@gmail.com>
> ---
> Changes since v2:
> -rebase on for-next branch
> -make the list increase by area->begin
> 
> Changes since v1:
> -rename some variables
> -add parentheses for clarify
> -make gennvm_get_area return int, and add one more sector_t* parameter
> to pass the begin sector of the corresponding target
> -rebase to v4.5-rc1
> 
>  drivers/lightnvm/core.c   |  1 +
>  drivers/lightnvm/gennvm.c | 65 +++++++++++++++++++++++++++++++++++++++++++++++
>  drivers/lightnvm/gennvm.h |  6 +++++
>  drivers/lightnvm/rrpc.c   | 36 +++++++++++++++++++++++---
>  drivers/lightnvm/rrpc.h   |  1 +
>  include/linux/lightnvm.h  |  8 ++++++
>  6 files changed, 114 insertions(+), 3 deletions(-)
> 
> diff --git a/drivers/lightnvm/core.c b/drivers/lightnvm/core.c
> index 5471cc5..93c035b 100644
> --- a/drivers/lightnvm/core.c
> +++ b/drivers/lightnvm/core.c
> @@ -466,6 +466,7 @@ static int nvm_core_init(struct nvm_dev *dev)
>  	dev->total_secs = dev->nr_luns * dev->sec_per_lun;
>  	INIT_LIST_HEAD(&dev->online_targets);
>  	mutex_init(&dev->mlock);
> +	spin_lock_init(&dev->lock);
>  
>  	return 0;
>  }
> diff --git a/drivers/lightnvm/gennvm.c b/drivers/lightnvm/gennvm.c
> index d65ec36..fba3fbd 100644
> --- a/drivers/lightnvm/gennvm.c
> +++ b/drivers/lightnvm/gennvm.c
> @@ -20,6 +20,66 @@
>  
>  #include "gennvm.h"
>  
> +static int gennvm_get_area(struct nvm_dev *dev, sector_t *begin_sect,
> +							sector_t size)
> +{
> +	struct gen_nvm *gn = dev->mp;
> +	struct gennvm_area *area, *prev, *next;
> +	sector_t begin = 0;
> +	sector_t max_sectors = (dev->sec_size * dev->total_secs) >> 9;
> +
> +	if (size > max_sectors)
> +		return -EINVAL;
> +	area = kmalloc(sizeof(struct gennvm_area), GFP_KERNEL);
> +	if (!area)
> +		return -ENOMEM;
> +
> +	prev = NULL;
> +
> +	spin_lock(&dev->lock);
> +	list_for_each_entry(next, &gn->area_list, list) {
> +		if (begin + size > next->begin) {
> +			begin = next->end;
> +			prev = next;
> +			continue;
> +		}
> +		break;
> +	}
> +
> +	if ((begin + size) > max_sectors) {
> +		spin_unlock(&dev->lock);
> +		kfree(area);
> +		return -EINVAL;
> +	}
> +
> +	area->begin = *begin_sect =  begin;
> +	area->end = begin + size;
> +	if (prev)
> +		list_add(&area->list, &prev->list);
> +	else
> +		list_add(&area->list, &gn->area_list);
> +	spin_unlock(&dev->lock);
> +	return 0;
> +}
> +
> +static void gennvm_put_area(struct nvm_dev *dev, sector_t begin)
> +{
> +	struct gen_nvm *gn = dev->mp;
> +	struct gennvm_area *area;
> +
> +	spin_lock(&dev->lock);
> +	list_for_each_entry(area, &gn->area_list, list) {
> +		if (area->begin != begin)
> +			continue;
> +
> +		list_del(&area->list);
> +		spin_unlock(&dev->lock);
> +		kfree(area);
> +		return;
> +	}
> +	spin_unlock(&dev->lock);
> +}
> +
>  static void gennvm_blocks_free(struct nvm_dev *dev)
>  {
>  	struct gen_nvm *gn = dev->mp;
> @@ -229,6 +289,7 @@ static int gennvm_register(struct nvm_dev *dev)
>  
>  	gn->dev = dev;
>  	gn->nr_luns = dev->nr_luns;
> +	INIT_LIST_HEAD(&gn->area_list);
>  	dev->mp = gn;
>  
>  	ret = gennvm_luns_init(dev, gn);
> @@ -465,6 +526,10 @@ static struct nvmm_type gennvm = {
>  
>  	.get_lun		= gennvm_get_lun,
>  	.lun_info_print		= gennvm_lun_info_print,
> +
> +	.get_area		= gennvm_get_area,
> +	.put_area		= gennvm_put_area,
> +
>  };
>  
>  static int __init gennvm_module_init(void)
> diff --git a/drivers/lightnvm/gennvm.h b/drivers/lightnvm/gennvm.h
> index 9c24b5b..04d7c23 100644
> --- a/drivers/lightnvm/gennvm.h
> +++ b/drivers/lightnvm/gennvm.h
> @@ -39,8 +39,14 @@ struct gen_nvm {
>  
>  	int nr_luns;
>  	struct gen_lun *luns;
> +	struct list_head area_list;
>  };
>  
> +struct gennvm_area {
> +	struct list_head list;
> +	sector_t begin;
> +	sector_t end;	/* end is excluded */
> +};
>  #define gennvm_for_each_lun(bm, lun, i) \
>  		for ((i) = 0, lun = &(bm)->luns[0]; \
>  			(i) < (bm)->nr_luns; (i)++, lun = &(bm)->luns[(i)])
> diff --git a/drivers/lightnvm/rrpc.c b/drivers/lightnvm/rrpc.c
> index c4d0b04..6ce5f73 100644
> --- a/drivers/lightnvm/rrpc.c
> +++ b/drivers/lightnvm/rrpc.c
> @@ -1038,8 +1038,11 @@ static int rrpc_map_init(struct rrpc *rrpc)
>  {
>  	struct nvm_dev *dev = rrpc->dev;
>  	sector_t i;
> +	u64 slba;
>  	int ret;
>  
> +	slba = rrpc->soffset >> (ilog2(dev->sec_size) - 9);
> +
>  	rrpc->trans_map = vzalloc(sizeof(struct rrpc_addr) * rrpc->nr_sects);
>  	if (!rrpc->trans_map)
>  		return -ENOMEM;
> @@ -1061,8 +1064,8 @@ static int rrpc_map_init(struct rrpc *rrpc)
>  		return 0;
>  
>  	/* Bring up the mapping table from device */
> -	ret = dev->ops->get_l2p_tbl(dev, 0, dev->total_secs, rrpc_l2p_update,
> -									rrpc);
> +	ret = dev->ops->get_l2p_tbl(dev, slba, rrpc->nr_sects,
> +			rrpc_l2p_update, rrpc);
>  	if (ret) {
>  		pr_err("nvm: rrpc: could not read L2P table.\n");
>  		return -EINVAL;
> @@ -1071,7 +1074,6 @@ static int rrpc_map_init(struct rrpc *rrpc)
>  	return 0;
>  }
>  
> -
>  /* Minimum pages needed within a lun */
>  #define PAGE_POOL_SIZE 16
>  #define ADDR_POOL_SIZE 64
> @@ -1185,12 +1187,32 @@ err:
>  	return -ENOMEM;
>  }
>  
> +/* returns 0 on success and stores the beginning address in *begin */
> +static int rrpc_area_init(struct rrpc *rrpc, sector_t *begin)
> +{
> +	struct nvm_dev *dev = rrpc->dev;
> +	struct nvmm_type *mt = dev->mt;
> +	sector_t size = rrpc->nr_sects * dev->sec_size;
> +
> +	size >>= 9;
> +	return mt->get_area(dev, begin, size);
> +}
> +
> +static void rrpc_area_free(struct rrpc *rrpc)
> +{
> +	struct nvm_dev *dev = rrpc->dev;
> +	struct nvmm_type *mt = dev->mt;
> +
> +	mt->put_area(dev, rrpc->soffset);
> +}
> +
>  static void rrpc_free(struct rrpc *rrpc)
>  {
>  	rrpc_gc_free(rrpc);
>  	rrpc_map_free(rrpc);
>  	rrpc_core_free(rrpc);
>  	rrpc_luns_free(rrpc);
> +	rrpc_area_free(rrpc);
>  
>  	kfree(rrpc);
>  }
> @@ -1311,6 +1333,7 @@ static void *rrpc_init(struct nvm_dev *dev, struct gendisk *tdisk,
>  	struct request_queue *bqueue = dev->q;
>  	struct request_queue *tqueue = tdisk->queue;
>  	struct rrpc *rrpc;
> +	sector_t soffset;
>  	int ret;
>  
>  	if (!(dev->identity.dom & NVM_RSP_L2P)) {
> @@ -1336,6 +1359,13 @@ static void *rrpc_init(struct nvm_dev *dev, struct gendisk *tdisk,
>  	/* simple round-robin strategy */
>  	atomic_set(&rrpc->next_lun, -1);
>  
> +	ret = rrpc_area_init(rrpc, &soffset);
> +	if (ret < 0) {
> +		pr_err("nvm: rrpc: could not initialize area\n");
> +		return ERR_PTR(ret);
> +	}
> +	rrpc->soffset = soffset;
> +
>  	ret = rrpc_luns_init(rrpc, lun_begin, lun_end);
>  	if (ret) {
>  		pr_err("nvm: rrpc: could not initialize luns\n");
> diff --git a/drivers/lightnvm/rrpc.h b/drivers/lightnvm/rrpc.h
> index dfca5c4..6148b14 100644
> --- a/drivers/lightnvm/rrpc.h
> +++ b/drivers/lightnvm/rrpc.h
> @@ -97,6 +97,7 @@ struct rrpc {
>  	struct nvm_dev *dev;
>  	struct gendisk *disk;
>  
> +	sector_t soffset; /* logical sector offset */
>  	u64 poffset; /* physical page offset */
>  	int lun_offset;
>  
> diff --git a/include/linux/lightnvm.h b/include/linux/lightnvm.h
> index b94f2d5..ce58ad5 100644
> --- a/include/linux/lightnvm.h
> +++ b/include/linux/lightnvm.h
> @@ -351,6 +351,7 @@ struct nvm_dev {
>  	char name[DISK_NAME_LEN];
>  
>  	struct mutex mlock;
> +	spinlock_t lock;
>  };
>  
>  static inline struct ppa_addr generic_to_dev_addr(struct nvm_dev *dev,
> @@ -463,6 +464,9 @@ typedef int (nvmm_erase_blk_fn)(struct nvm_dev *, struct nvm_block *,
>  typedef struct nvm_lun *(nvmm_get_lun_fn)(struct nvm_dev *, int);
>  typedef void (nvmm_lun_info_print_fn)(struct nvm_dev *);
>  
> +typedef int (nvmm_get_area_fn)(struct nvm_dev *, sector_t *, sector_t);
> +typedef void (nvmm_put_area_fn)(struct nvm_dev *, sector_t);
> +
>  struct nvmm_type {
>  	const char *name;
>  	unsigned int version[3];
> @@ -487,6 +491,10 @@ struct nvmm_type {
>  
>  	/* Statistics */
>  	nvmm_lun_info_print_fn *lun_info_print;
> +
> +	nvmm_get_area_fn *get_area;
> +	nvmm_put_area_fn *put_area;
> +
>  	struct list_head list;
>  };
>  
> 

Thanks, applied for 4.6. I made some small style changes.

^ permalink raw reply	[flat|nested] 9+ messages in thread

* Re: [PATCH 2/3] lightnvm: add a bitmap of luns
  2016-02-04 11:34 ` [PATCH 2/3] lightnvm: add a bitmap of luns Wenwei Tao
@ 2016-02-05 11:59   ` Matias Bjørling
  2016-02-05 12:23     ` Wenwei Tao
  0 siblings, 1 reply; 9+ messages in thread
From: Matias Bjørling @ 2016-02-05 11:59 UTC (permalink / raw)
  To: Wenwei Tao; +Cc: linux-kernel, linux-block

On 02/04/2016 12:34 PM, Wenwei Tao wrote:
> Add a bitmap of luns to indicate the status
> of luns: inuse/available. When create targets
> do the necessary check to avoid allocating luns
> that are already allocated.
> 
> Signed-off-by: Wenwei Tao <ww.tao0320@gmail.com>
> ---
>  drivers/lightnvm/core.c   |  5 ++++
>  drivers/lightnvm/gennvm.c | 18 +++++++++++++++
>  drivers/lightnvm/rrpc.c   | 59 +++++++++++++++++++++++++++++++++++------------
>  include/linux/lightnvm.h  |  5 ++++
>  4 files changed, 72 insertions(+), 15 deletions(-)
> 
> diff --git a/drivers/lightnvm/core.c b/drivers/lightnvm/core.c
> index 93c035b..11b8e2d 100644
> --- a/drivers/lightnvm/core.c
> +++ b/drivers/lightnvm/core.c
> @@ -464,6 +464,10 @@ static int nvm_core_init(struct nvm_dev *dev)
>  	dev->nr_luns = dev->luns_per_chnl * dev->nr_chnls;
>  
>  	dev->total_secs = dev->nr_luns * dev->sec_per_lun;
> +	dev->lun_map = kcalloc(BITS_TO_LONGS(dev->nr_luns),
> +		sizeof(unsigned long), GFP_KERNEL);
> +	if (!dev->lun_map)
> +		return -ENOMEM;
>  	INIT_LIST_HEAD(&dev->online_targets);
>  	mutex_init(&dev->mlock);
>  	spin_lock_init(&dev->lock);
> @@ -606,6 +610,7 @@ void nvm_unregister(char *disk_name)
>  	up_write(&nvm_lock);
>  
>  	nvm_exit(dev);
> +	kfree(dev->lun_map);
>  	kfree(dev);
>  }
>  EXPORT_SYMBOL(nvm_unregister);
> diff --git a/drivers/lightnvm/gennvm.c b/drivers/lightnvm/gennvm.c
> index fba3fbd..adc10c2 100644
> --- a/drivers/lightnvm/gennvm.c
> +++ b/drivers/lightnvm/gennvm.c
> @@ -190,6 +190,9 @@ static int gennvm_block_map(u64 slba, u32 nlb, __le64 *entries, void *private)
>  		lun_id = div_u64(pba, dev->sec_per_lun);
>  		lun = &gn->luns[lun_id];
>  
> +		if (!test_bit(lun_id, dev->lun_map))
> +			__set_bit(lun_id, dev->lun_map);
> +
>  		/* Calculate block offset into lun */
>  		pba = pba - (dev->sec_per_lun * lun_id);
>  		blk = &lun->vlun.blocks[div_u64(pba, dev->sec_per_blk)];
> @@ -480,10 +483,23 @@ static int gennvm_erase_blk(struct nvm_dev *dev, struct nvm_block *blk,
>  	return nvm_erase_ppa(dev, &addr, 1);
>  }
>  
> +static int gennvm_reserve_lun(struct nvm_dev *dev, int lunid)
> +{
> +	return test_and_set_bit(lunid, dev->lun_map);
> +}
> +
> +static void gennvm_release_lun(struct nvm_dev *dev, int lunid)
> +{
> +	WARN_ON(!test_and_clear_bit(lunid, dev->lun_map));
> +}
> +
>  static struct nvm_lun *gennvm_get_lun(struct nvm_dev *dev, int lunid)
>  {
>  	struct gen_nvm *gn = dev->mp;
>  
> +	if (unlikely(lunid >= dev->nr_luns))
> +		return NULL;
> +
>  	return &gn->luns[lunid].vlun;
>  }
>  
> @@ -525,6 +541,8 @@ static struct nvmm_type gennvm = {
>  	.erase_blk		= gennvm_erase_blk,
>  
>  	.get_lun		= gennvm_get_lun,
> +	.reserve_lun		= gennvm_reserve_lun,
> +	.release_lun		= gennvm_release_lun,
>  	.lun_info_print		= gennvm_lun_info_print,
>  
>  	.get_area		= gennvm_get_area,
> diff --git a/drivers/lightnvm/rrpc.c b/drivers/lightnvm/rrpc.c
> index 6ce5f73..2bd5789 100644
> --- a/drivers/lightnvm/rrpc.c
> +++ b/drivers/lightnvm/rrpc.c
> @@ -1128,6 +1128,22 @@ static void rrpc_core_free(struct rrpc *rrpc)
>  
>  static void rrpc_luns_free(struct rrpc *rrpc)
>  {
> +	struct nvm_dev *dev = rrpc->dev;
> +	struct nvm_lun *lun;
> +	struct rrpc_lun *rlun;
> +	int i;
> +
> +	if (!rrpc->luns)
> +		return;
> +
> +	for (i = 0; i < rrpc->nr_luns; i++) {
> +		rlun = &rrpc->luns[i];
> +		lun = rlun->parent;
> +		if (!lun)
> +			break;
> +		dev->mt->release_lun(dev, lun->id);
> +		vfree(rlun->blocks);
> +	}
>  	kfree(rrpc->luns);
>  }
>  
> @@ -1135,7 +1151,7 @@ static int rrpc_luns_init(struct rrpc *rrpc, int lun_begin, int lun_end)
>  {
>  	struct nvm_dev *dev = rrpc->dev;
>  	struct rrpc_lun *rlun;
> -	int i, j;
> +	int i, j, ret = -EINVAL;
>  
>  	if (dev->pgs_per_blk > MAX_INVALID_PAGES_STORAGE * BITS_PER_LONG) {
>  		pr_err("rrpc: number of pages per block too high.");
> @@ -1151,25 +1167,26 @@ static int rrpc_luns_init(struct rrpc *rrpc, int lun_begin, int lun_end)
>  
>  	/* 1:1 mapping */
>  	for (i = 0; i < rrpc->nr_luns; i++) {
> -		struct nvm_lun *lun = dev->mt->get_lun(dev, lun_begin + i);
> +		int lunid = lun_begin + i;
> +		struct nvm_lun *lun;
>  
> -		rlun = &rrpc->luns[i];
> -		rlun->rrpc = rrpc;
> -		rlun->parent = lun;
> -		INIT_LIST_HEAD(&rlun->prio_list);
> -		INIT_LIST_HEAD(&rlun->open_list);
> -		INIT_LIST_HEAD(&rlun->closed_list);
> -
> -		INIT_WORK(&rlun->ws_gc, rrpc_lun_gc);
> -		spin_lock_init(&rlun->lock);
> +		if (dev->mt->reserve_lun(dev, lunid)) {
> +			pr_err("rrpc: lun %u is already allocated\n", lunid);
> +			goto err;
> +		}
>  
> -		rrpc->total_blocks += dev->blks_per_lun;
> -		rrpc->nr_sects += dev->sec_per_lun;
> +		lun = dev->mt->get_lun(dev, lunid);
> +		if (!lun)
> +			goto err;
>  
> +		rlun = &rrpc->luns[i];
> +		rlun->parent = lun;
>  		rlun->blocks = vzalloc(sizeof(struct rrpc_block) *
>  						rrpc->dev->blks_per_lun);
> -		if (!rlun->blocks)
> +		if (!rlun->blocks) {
> +			ret = -ENOMEM;
>  			goto err;
> +		}
>  
>  		for (j = 0; j < rrpc->dev->blks_per_lun; j++) {
>  			struct rrpc_block *rblk = &rlun->blocks[j];
> @@ -1180,11 +1197,23 @@ static int rrpc_luns_init(struct rrpc *rrpc, int lun_begin, int lun_end)
>  			INIT_LIST_HEAD(&rblk->prio);
>  			spin_lock_init(&rblk->lock);
>  		}
> +
> +		rlun->rrpc = rrpc;
> +		INIT_LIST_HEAD(&rlun->prio_list);
> +		INIT_LIST_HEAD(&rlun->open_list);
> +		INIT_LIST_HEAD(&rlun->closed_list);
> +
> +		INIT_WORK(&rlun->ws_gc, rrpc_lun_gc);
> +		spin_lock_init(&rlun->lock);
> +
> +		rrpc->total_blocks += dev->blks_per_lun;
> +		rrpc->nr_sects += dev->sec_per_lun;
> +
>  	}
>  
>  	return 0;
>  err:
> -	return -ENOMEM;
> +	return ret;
>  }
>  
>  /* returns 0 on success and stores the beginning address in *begin */
> diff --git a/include/linux/lightnvm.h b/include/linux/lightnvm.h
> index ce58ad5..2a17dc1 100644
> --- a/include/linux/lightnvm.h
> +++ b/include/linux/lightnvm.h
> @@ -342,6 +342,7 @@ struct nvm_dev {
>  	int nr_luns;
>  	unsigned max_pages_per_blk;
>  
> +	unsigned long *lun_map;
>  	void *ppalist_pool;
>  
>  	struct nvm_id identity;
> @@ -462,6 +463,8 @@ typedef int (nvmm_submit_io_fn)(struct nvm_dev *, struct nvm_rq *);
>  typedef int (nvmm_erase_blk_fn)(struct nvm_dev *, struct nvm_block *,
>  								unsigned long);
>  typedef struct nvm_lun *(nvmm_get_lun_fn)(struct nvm_dev *, int);
> +typedef int (nvmm_reserve_lun)(struct nvm_dev *, int);
> +typedef void (nvmm_release_lun)(struct nvm_dev *, int);
>  typedef void (nvmm_lun_info_print_fn)(struct nvm_dev *);
>  
>  typedef int (nvmm_get_area_fn)(struct nvm_dev *, sector_t *, sector_t);
> @@ -488,6 +491,8 @@ struct nvmm_type {
>  
>  	/* Configuration management */
>  	nvmm_get_lun_fn *get_lun;
> +	nvmm_reserve_lun *reserve_lun;
> +	nvmm_release_lun *release_lun;
>  
>  	/* Statistics */
>  	nvmm_lun_info_print_fn *lun_info_print;
> 
Thanks, applied for 4.6. I added an extra kfree(dev->lun_map) at the end
of nvm_core_init to make sure that we freed the lun_map if other
allocations fails in initialization.

^ permalink raw reply	[flat|nested] 9+ messages in thread

* Re: [PATCH 2/3] lightnvm: add a bitmap of luns
  2016-02-05 11:59   ` Matias Bjørling
@ 2016-02-05 12:23     ` Wenwei Tao
  2016-02-05 12:24       ` Matias Bjørling
  0 siblings, 1 reply; 9+ messages in thread
From: Wenwei Tao @ 2016-02-05 12:23 UTC (permalink / raw)
  To: Matias Bjørling; +Cc: linux-kernel, linux-block

Forgot to do that.
Thanks for fixing my mistake.

2016-02-05 19:59 GMT+08:00 Matias Bjørling <mb@lightnvm.io>:
> On 02/04/2016 12:34 PM, Wenwei Tao wrote:
>> Add a bitmap of luns to indicate the status
>> of luns: inuse/available. When create targets
>> do the necessary check to avoid allocating luns
>> that are already allocated.
>>
>> Signed-off-by: Wenwei Tao <ww.tao0320@gmail.com>
>> ---
>>  drivers/lightnvm/core.c   |  5 ++++
>>  drivers/lightnvm/gennvm.c | 18 +++++++++++++++
>>  drivers/lightnvm/rrpc.c   | 59 +++++++++++++++++++++++++++++++++++------------
>>  include/linux/lightnvm.h  |  5 ++++
>>  4 files changed, 72 insertions(+), 15 deletions(-)
>>
>> diff --git a/drivers/lightnvm/core.c b/drivers/lightnvm/core.c
>> index 93c035b..11b8e2d 100644
>> --- a/drivers/lightnvm/core.c
>> +++ b/drivers/lightnvm/core.c
>> @@ -464,6 +464,10 @@ static int nvm_core_init(struct nvm_dev *dev)
>>       dev->nr_luns = dev->luns_per_chnl * dev->nr_chnls;
>>
>>       dev->total_secs = dev->nr_luns * dev->sec_per_lun;
>> +     dev->lun_map = kcalloc(BITS_TO_LONGS(dev->nr_luns),
>> +             sizeof(unsigned long), GFP_KERNEL);
>> +     if (!dev->lun_map)
>> +             return -ENOMEM;
>>       INIT_LIST_HEAD(&dev->online_targets);
>>       mutex_init(&dev->mlock);
>>       spin_lock_init(&dev->lock);
>> @@ -606,6 +610,7 @@ void nvm_unregister(char *disk_name)
>>       up_write(&nvm_lock);
>>
>>       nvm_exit(dev);
>> +     kfree(dev->lun_map);
>>       kfree(dev);
>>  }
>>  EXPORT_SYMBOL(nvm_unregister);
>> diff --git a/drivers/lightnvm/gennvm.c b/drivers/lightnvm/gennvm.c
>> index fba3fbd..adc10c2 100644
>> --- a/drivers/lightnvm/gennvm.c
>> +++ b/drivers/lightnvm/gennvm.c
>> @@ -190,6 +190,9 @@ static int gennvm_block_map(u64 slba, u32 nlb, __le64 *entries, void *private)
>>               lun_id = div_u64(pba, dev->sec_per_lun);
>>               lun = &gn->luns[lun_id];
>>
>> +             if (!test_bit(lun_id, dev->lun_map))
>> +                     __set_bit(lun_id, dev->lun_map);
>> +
>>               /* Calculate block offset into lun */
>>               pba = pba - (dev->sec_per_lun * lun_id);
>>               blk = &lun->vlun.blocks[div_u64(pba, dev->sec_per_blk)];
>> @@ -480,10 +483,23 @@ static int gennvm_erase_blk(struct nvm_dev *dev, struct nvm_block *blk,
>>       return nvm_erase_ppa(dev, &addr, 1);
>>  }
>>
>> +static int gennvm_reserve_lun(struct nvm_dev *dev, int lunid)
>> +{
>> +     return test_and_set_bit(lunid, dev->lun_map);
>> +}
>> +
>> +static void gennvm_release_lun(struct nvm_dev *dev, int lunid)
>> +{
>> +     WARN_ON(!test_and_clear_bit(lunid, dev->lun_map));
>> +}
>> +
>>  static struct nvm_lun *gennvm_get_lun(struct nvm_dev *dev, int lunid)
>>  {
>>       struct gen_nvm *gn = dev->mp;
>>
>> +     if (unlikely(lunid >= dev->nr_luns))
>> +             return NULL;
>> +
>>       return &gn->luns[lunid].vlun;
>>  }
>>
>> @@ -525,6 +541,8 @@ static struct nvmm_type gennvm = {
>>       .erase_blk              = gennvm_erase_blk,
>>
>>       .get_lun                = gennvm_get_lun,
>> +     .reserve_lun            = gennvm_reserve_lun,
>> +     .release_lun            = gennvm_release_lun,
>>       .lun_info_print         = gennvm_lun_info_print,
>>
>>       .get_area               = gennvm_get_area,
>> diff --git a/drivers/lightnvm/rrpc.c b/drivers/lightnvm/rrpc.c
>> index 6ce5f73..2bd5789 100644
>> --- a/drivers/lightnvm/rrpc.c
>> +++ b/drivers/lightnvm/rrpc.c
>> @@ -1128,6 +1128,22 @@ static void rrpc_core_free(struct rrpc *rrpc)
>>
>>  static void rrpc_luns_free(struct rrpc *rrpc)
>>  {
>> +     struct nvm_dev *dev = rrpc->dev;
>> +     struct nvm_lun *lun;
>> +     struct rrpc_lun *rlun;
>> +     int i;
>> +
>> +     if (!rrpc->luns)
>> +             return;
>> +
>> +     for (i = 0; i < rrpc->nr_luns; i++) {
>> +             rlun = &rrpc->luns[i];
>> +             lun = rlun->parent;
>> +             if (!lun)
>> +                     break;
>> +             dev->mt->release_lun(dev, lun->id);
>> +             vfree(rlun->blocks);
>> +     }
>>       kfree(rrpc->luns);
>>  }
>>
>> @@ -1135,7 +1151,7 @@ static int rrpc_luns_init(struct rrpc *rrpc, int lun_begin, int lun_end)
>>  {
>>       struct nvm_dev *dev = rrpc->dev;
>>       struct rrpc_lun *rlun;
>> -     int i, j;
>> +     int i, j, ret = -EINVAL;
>>
>>       if (dev->pgs_per_blk > MAX_INVALID_PAGES_STORAGE * BITS_PER_LONG) {
>>               pr_err("rrpc: number of pages per block too high.");
>> @@ -1151,25 +1167,26 @@ static int rrpc_luns_init(struct rrpc *rrpc, int lun_begin, int lun_end)
>>
>>       /* 1:1 mapping */
>>       for (i = 0; i < rrpc->nr_luns; i++) {
>> -             struct nvm_lun *lun = dev->mt->get_lun(dev, lun_begin + i);
>> +             int lunid = lun_begin + i;
>> +             struct nvm_lun *lun;
>>
>> -             rlun = &rrpc->luns[i];
>> -             rlun->rrpc = rrpc;
>> -             rlun->parent = lun;
>> -             INIT_LIST_HEAD(&rlun->prio_list);
>> -             INIT_LIST_HEAD(&rlun->open_list);
>> -             INIT_LIST_HEAD(&rlun->closed_list);
>> -
>> -             INIT_WORK(&rlun->ws_gc, rrpc_lun_gc);
>> -             spin_lock_init(&rlun->lock);
>> +             if (dev->mt->reserve_lun(dev, lunid)) {
>> +                     pr_err("rrpc: lun %u is already allocated\n", lunid);
>> +                     goto err;
>> +             }
>>
>> -             rrpc->total_blocks += dev->blks_per_lun;
>> -             rrpc->nr_sects += dev->sec_per_lun;
>> +             lun = dev->mt->get_lun(dev, lunid);
>> +             if (!lun)
>> +                     goto err;
>>
>> +             rlun = &rrpc->luns[i];
>> +             rlun->parent = lun;
>>               rlun->blocks = vzalloc(sizeof(struct rrpc_block) *
>>                                               rrpc->dev->blks_per_lun);
>> -             if (!rlun->blocks)
>> +             if (!rlun->blocks) {
>> +                     ret = -ENOMEM;
>>                       goto err;
>> +             }
>>
>>               for (j = 0; j < rrpc->dev->blks_per_lun; j++) {
>>                       struct rrpc_block *rblk = &rlun->blocks[j];
>> @@ -1180,11 +1197,23 @@ static int rrpc_luns_init(struct rrpc *rrpc, int lun_begin, int lun_end)
>>                       INIT_LIST_HEAD(&rblk->prio);
>>                       spin_lock_init(&rblk->lock);
>>               }
>> +
>> +             rlun->rrpc = rrpc;
>> +             INIT_LIST_HEAD(&rlun->prio_list);
>> +             INIT_LIST_HEAD(&rlun->open_list);
>> +             INIT_LIST_HEAD(&rlun->closed_list);
>> +
>> +             INIT_WORK(&rlun->ws_gc, rrpc_lun_gc);
>> +             spin_lock_init(&rlun->lock);
>> +
>> +             rrpc->total_blocks += dev->blks_per_lun;
>> +             rrpc->nr_sects += dev->sec_per_lun;
>> +
>>       }
>>
>>       return 0;
>>  err:
>> -     return -ENOMEM;
>> +     return ret;
>>  }
>>
>>  /* returns 0 on success and stores the beginning address in *begin */
>> diff --git a/include/linux/lightnvm.h b/include/linux/lightnvm.h
>> index ce58ad5..2a17dc1 100644
>> --- a/include/linux/lightnvm.h
>> +++ b/include/linux/lightnvm.h
>> @@ -342,6 +342,7 @@ struct nvm_dev {
>>       int nr_luns;
>>       unsigned max_pages_per_blk;
>>
>> +     unsigned long *lun_map;
>>       void *ppalist_pool;
>>
>>       struct nvm_id identity;
>> @@ -462,6 +463,8 @@ typedef int (nvmm_submit_io_fn)(struct nvm_dev *, struct nvm_rq *);
>>  typedef int (nvmm_erase_blk_fn)(struct nvm_dev *, struct nvm_block *,
>>                                                               unsigned long);
>>  typedef struct nvm_lun *(nvmm_get_lun_fn)(struct nvm_dev *, int);
>> +typedef int (nvmm_reserve_lun)(struct nvm_dev *, int);
>> +typedef void (nvmm_release_lun)(struct nvm_dev *, int);
>>  typedef void (nvmm_lun_info_print_fn)(struct nvm_dev *);
>>
>>  typedef int (nvmm_get_area_fn)(struct nvm_dev *, sector_t *, sector_t);
>> @@ -488,6 +491,8 @@ struct nvmm_type {
>>
>>       /* Configuration management */
>>       nvmm_get_lun_fn *get_lun;
>> +     nvmm_reserve_lun *reserve_lun;
>> +     nvmm_release_lun *release_lun;
>>
>>       /* Statistics */
>>       nvmm_lun_info_print_fn *lun_info_print;
>>
> Thanks, applied for 4.6. I added an extra kfree(dev->lun_map) at the end
> of nvm_core_init to make sure that we freed the lun_map if other
> allocations fails in initialization.

^ permalink raw reply	[flat|nested] 9+ messages in thread

* Re: [PATCH 2/3] lightnvm: add a bitmap of luns
  2016-02-05 12:23     ` Wenwei Tao
@ 2016-02-05 12:24       ` Matias Bjørling
  0 siblings, 0 replies; 9+ messages in thread
From: Matias Bjørling @ 2016-02-05 12:24 UTC (permalink / raw)
  To: Wenwei Tao; +Cc: linux-kernel, linux-block

On 02/05/2016 01:23 PM, Wenwei Tao wrote:
> Forgot to do that.
> Thanks for fixing my mistake.

No worries, you fixed a couple that I introduced ;)

^ permalink raw reply	[flat|nested] 9+ messages in thread

* Re: [PATCH v3 3/3] lightnvm: add non-continuous lun target creation support
  2016-02-05  2:42 ` [PATCH v3 3/3] lightnvm: add non-continuous lun target creation support Wenwei Tao
@ 2016-02-05 12:55   ` Matias Bjørling
  2016-02-05 14:19     ` Wenwei Tao
  0 siblings, 1 reply; 9+ messages in thread
From: Matias Bjørling @ 2016-02-05 12:55 UTC (permalink / raw)
  To: Wenwei Tao; +Cc: linux-kernel, linux-block

On 02/05/2016 03:42 AM, Wenwei Tao wrote:
> When create a target, we specify the begin lunid and
> the end lunid, and get the corresponding continuous
> luns from media manager, if one of the luns is not free,
> we failed to create the target, even if the device's
> total free luns are enough.
> 
> So add non-continuous lun target creation support,
> thus we can improve the backend device's space utilization.
> 
> Signed-off-by: Wenwei Tao <ww.tao0320@gmail.com>
> ---
> Changes since v2
> -rebase on for-next branch
> -move luns bitmap to PATCH 2
> -remove the logic to dynamically select another lun than 
> the one requested
> -implement lunid list in the lnvm ioctl interface
> 
> Changes since v1
> -use NVM_FIXED instead NVM_C_FIXED in gennvm_get_lun
> -add target creation flags check
> -rebase to v4.5-rc1
> 
>  drivers/lightnvm/core.c       | 101 ++++++++++++++------
>  drivers/lightnvm/rrpc.c       | 208 ++++++++++++++++++++++++++----------------
>  drivers/lightnvm/rrpc.h       |   7 +-
>  include/linux/lightnvm.h      |   4 +-
>  include/uapi/linux/lightnvm.h |  18 ++++
>  5 files changed, 228 insertions(+), 110 deletions(-)
> 
> diff --git a/drivers/lightnvm/core.c b/drivers/lightnvm/core.c
> index 11b8e2d..cbfd575 100644
> --- a/drivers/lightnvm/core.c
> +++ b/drivers/lightnvm/core.c
> @@ -622,7 +622,7 @@ static const struct block_device_operations nvm_fops = {
>  static int nvm_create_target(struct nvm_dev *dev,
>  						struct nvm_ioctl_create *create)
>  {
> -	struct nvm_ioctl_create_simple *s = &create->conf.s;
> +	struct nvm_ioctl_create_conf *conf = &create->conf;
>  	struct request_queue *tqueue;
>  	struct gendisk *tdisk;
>  	struct nvm_tgt_type *tt;
> @@ -671,7 +671,7 @@ static int nvm_create_target(struct nvm_dev *dev,
>  	tdisk->fops = &nvm_fops;
>  	tdisk->queue = tqueue;
>  
> -	targetdata = tt->init(dev, tdisk, s->lun_begin, s->lun_end);
> +	targetdata = tt->init(dev, tdisk, conf);
>  	if (IS_ERR(targetdata))
>  		goto err_init;
>  
> @@ -723,7 +723,6 @@ static void nvm_remove_target(struct nvm_target *t)
>  static int __nvm_configure_create(struct nvm_ioctl_create *create)
>  {
>  	struct nvm_dev *dev;
> -	struct nvm_ioctl_create_simple *s;
>  
>  	down_write(&nvm_lock);
>  	dev = nvm_find_nvm_dev(create->dev);
> @@ -733,17 +732,11 @@ static int __nvm_configure_create(struct nvm_ioctl_create *create)
>  		return -EINVAL;
>  	}
>  
> -	if (create->conf.type != NVM_CONFIG_TYPE_SIMPLE) {
> +	if (create->conf.type != NVM_CONFIG_TYPE_SIMPLE &&
> +		create->conf.type != NVM_CONFIG_TYPE_LIST) {
>  		pr_err("nvm: config type not valid\n");
>  		return -EINVAL;
>  	}
> -	s = &create->conf.s;
> -
> -	if (s->lun_begin > s->lun_end || s->lun_end > dev->nr_luns) {
> -		pr_err("nvm: lun out of bound (%u:%u > %u)\n",
> -			s->lun_begin, s->lun_end, dev->nr_luns);
> -		return -EINVAL;
> -	}
>  
>  	return nvm_create_target(dev, create);
>  }
> @@ -821,24 +814,29 @@ static int nvm_configure_remove(const char *val)
>  
>  static int nvm_configure_create(const char *val)
>  {
> -	struct nvm_ioctl_create create;
> +	struct nvm_ioctl_create *create;
>  	char opcode;
>  	int lun_begin, lun_end, ret;
>  
> -	ret = sscanf(val, "%c %256s %256s %48s %u:%u", &opcode, create.dev,
> -						create.tgtname, create.tgttype,
> +	create = kzalloc(sizeof(struct nvm_ioctl_create), GFP_KERNEL);
> +	if (!create)
> +		return -ENOMEM;
> +
> +	ret = sscanf(val, "%c %256s %256s %48s %u:%u", &opcode, create->dev,
> +					create->tgtname, create->tgttype,
>  						&lun_begin, &lun_end);
>  	if (ret != 6) {
>  		pr_err("nvm: invalid command. Use \"opcode device name tgttype lun_begin:lun_end\".\n");
> +		kfree(create);
>  		return -EINVAL;
>  	}
>  
> -	create.flags = 0;
> -	create.conf.type = NVM_CONFIG_TYPE_SIMPLE;
> -	create.conf.s.lun_begin = lun_begin;
> -	create.conf.s.lun_end = lun_end;
> +	create->flags = 0;
> +	create->conf.type = NVM_CONFIG_TYPE_SIMPLE;
> +	create->conf.s.lun_begin = lun_begin;
> +	create->conf.s.lun_end = lun_end;
>  
> -	return __nvm_configure_create(&create);
> +	return __nvm_configure_create(create);
>  }
>  
>  
> @@ -991,24 +989,30 @@ static long nvm_ioctl_get_devices(struct file *file, void __user *arg)
>  
>  static long nvm_ioctl_dev_create(struct file *file, void __user *arg)
>  {
> -	struct nvm_ioctl_create create;
> +	struct nvm_ioctl_create *create;
>  
>  	if (!capable(CAP_SYS_ADMIN))
>  		return -EPERM;
> +	create = kzalloc(sizeof(struct nvm_ioctl_create), GFP_KERNEL);
> +	if (!create)
> +		return -ENOMEM;
>  
> -	if (copy_from_user(&create, arg, sizeof(struct nvm_ioctl_create)))
> +	if (copy_from_user(&create, arg, sizeof(struct nvm_ioctl_create))) {
> +		kfree(create);
>  		return -EFAULT;
> +	}
>  
> -	create.dev[DISK_NAME_LEN - 1] = '\0';
> -	create.tgttype[NVM_TTYPE_NAME_MAX - 1] = '\0';
> -	create.tgtname[DISK_NAME_LEN - 1] = '\0';
> +	create->dev[DISK_NAME_LEN - 1] = '\0';
> +	create->tgttype[NVM_TTYPE_NAME_MAX - 1] = '\0';
> +	create->tgtname[DISK_NAME_LEN - 1] = '\0';
>  
> -	if (create.flags != 0) {
> +	if (create->flags != 0) {
>  		pr_err("nvm: no flags supported\n");
> +		kfree(create);
>  		return -EINVAL;
>  	}
>  
> -	return __nvm_configure_create(&create);
> +	return __nvm_configure_create(create);
>  }
>  
>  static long nvm_ioctl_dev_remove(struct file *file, void __user *arg)
> @@ -1031,6 +1035,49 @@ static long nvm_ioctl_dev_remove(struct file *file, void __user *arg)
>  	return __nvm_configure_remove(&remove);
>  }
>  
> +static long nvm_ioctl_dev_free_luns(struct file *file, void __user *arg)
> +{
> +	struct nvm_ioctl_free_luns *free_luns;
> +	struct nvm_dev *dev;
> +	int lunid = 0;
> +
> +	if (!capable(CAP_SYS_ADMIN))
> +		return -EPERM;
> +
> +	free_luns = kzalloc(sizeof(struct nvm_ioctl_free_luns), GFP_KERNEL);
> +	if (!free_luns)
> +		return -ENOMEM;
> +
> +	if (copy_from_user(&free_luns, arg,
> +		sizeof(struct nvm_ioctl_free_luns))) {
> +		kfree(free_luns);
> +		return -EFAULT;
> +	}
> +	free_luns->dev[DISK_NAME_LEN - 1] = '\0';
> +	down_write(&nvm_lock);
> +	dev = nvm_find_nvm_dev(free_luns->dev);
> +	up_write(&nvm_lock);
> +	if (!dev) {
> +		pr_err("nvm: device not found\n");
> +		kfree(free_luns);
> +		return -EINVAL;
> +	}
> +
> +	free_luns->nr_free_luns = 0;
> +	while ((lunid = find_next_zero_bit(dev->lun_map, dev->nr_luns,
> +				lunid)) < dev->nr_luns) {
> +
> +		if (free_luns->nr_free_luns >= NVM_LUNS_MAX) {
> +			pr_err("nvm: max %u free luns can be reported.\n",
> +							NVM_LUNS_MAX);
> +			break;
> +		}
> +		free_luns->free_lunid[free_luns->nr_free_luns++] = lunid;
> +		lunid++;
> +	}
> +	return 0;
> +}
> +
>  static void nvm_setup_nvm_sb_info(struct nvm_sb_info *info)
>  {
>  	info->seqnr = 1;
> @@ -1135,6 +1182,8 @@ static long nvm_ctl_ioctl(struct file *file, uint cmd, unsigned long arg)
>  		return nvm_ioctl_dev_create(file, argp);
>  	case NVM_DEV_REMOVE:
>  		return nvm_ioctl_dev_remove(file, argp);
> +	case NVM_DEV_FREE_LUNS:
> +		return nvm_ioctl_dev_free_luns(file, argp);
>  	case NVM_DEV_INIT:
>  		return nvm_ioctl_dev_init(file, argp);
>  	case NVM_DEV_FACTORY:
> diff --git a/drivers/lightnvm/rrpc.c b/drivers/lightnvm/rrpc.c
> index 2bd5789..88b395d 100644
> --- a/drivers/lightnvm/rrpc.c
> +++ b/drivers/lightnvm/rrpc.c
> @@ -23,28 +23,35 @@ static int rrpc_submit_io(struct rrpc *rrpc, struct bio *bio,
>  				struct nvm_rq *rqd, unsigned long flags);
>  
>  #define rrpc_for_each_lun(rrpc, rlun, i) \
> -		for ((i) = 0, rlun = &(rrpc)->luns[0]; \
> -			(i) < (rrpc)->nr_luns; (i)++, rlun = &(rrpc)->luns[(i)])
> +	for ((i) = 0, rlun = &(rrpc)->luns[0]; \
> +		(i) < (rrpc)->nr_luns; (i)++, rlun = &(rrpc)->luns[(i)])
> +
> +static inline u64 lun_poffset(struct nvm_lun *lun, struct nvm_dev *dev)
> +{
> +	return lun->id * dev->sec_per_lun;
> +}
>  
>  static void rrpc_page_invalidate(struct rrpc *rrpc, struct rrpc_addr *a)
>  {
>  	struct rrpc_block *rblk = a->rblk;
> -	unsigned int pg_offset;
> +	struct rrpc_lun *rlun = rblk->rlun;
> +	u64 pg_offset;
>  
> -	lockdep_assert_held(&rrpc->rev_lock);
> +	lockdep_assert_held(&rlun->rev_lock);
>  
>  	if (a->addr == ADDR_EMPTY || !rblk)
>  		return;
>  
>  	spin_lock(&rblk->lock);
>  
> -	div_u64_rem(a->addr, rrpc->dev->pgs_per_blk, &pg_offset);
> +	div_u64_rem(a->addr, rrpc->dev->pgs_per_blk, (u32 *)&pg_offset);
>  	WARN_ON(test_and_set_bit(pg_offset, rblk->invalid_pages));
>  	rblk->nr_invalid_pages++;
>  
>  	spin_unlock(&rblk->lock);
>  
> -	rrpc->rev_trans_map[a->addr - rrpc->poffset].addr = ADDR_EMPTY;
> +	pg_offset = lun_poffset(rlun->parent, rrpc->dev);
> +	rlun->rev_trans_map[a->addr - pg_offset].addr = ADDR_EMPTY;
>  }
>  
>  static void rrpc_invalidate_range(struct rrpc *rrpc, sector_t slba,
> @@ -52,14 +59,15 @@ static void rrpc_invalidate_range(struct rrpc *rrpc, sector_t slba,
>  {
>  	sector_t i;
>  
> -	spin_lock(&rrpc->rev_lock);
>  	for (i = slba; i < slba + len; i++) {
>  		struct rrpc_addr *gp = &rrpc->trans_map[i];
> +		struct rrpc_lun *rlun = gp->rblk->rlun;
>  
> +		spin_lock(&rlun->rev_lock);
>  		rrpc_page_invalidate(rrpc, gp);
> +		spin_unlock(&rlun->rev_lock);
>  		gp->rblk = NULL;
>  	}
> -	spin_unlock(&rrpc->rev_lock);
>  }
>  
>  static struct nvm_rq *rrpc_inflight_laddr_acquire(struct rrpc *rrpc,
> @@ -281,13 +289,14 @@ static void rrpc_end_sync_bio(struct bio *bio)
>  static int rrpc_move_valid_pages(struct rrpc *rrpc, struct rrpc_block *rblk)
>  {
>  	struct request_queue *q = rrpc->dev->q;
> +	struct rrpc_lun *rlun = rblk->rlun;
>  	struct rrpc_rev_addr *rev;
>  	struct nvm_rq *rqd;
>  	struct bio *bio;
>  	struct page *page;
>  	int slot;
>  	int nr_pgs_per_blk = rrpc->dev->pgs_per_blk;
> -	u64 phys_addr;
> +	u64 phys_addr, poffset;
>  	DECLARE_COMPLETION_ONSTACK(wait);
>  
>  	if (bitmap_full(rblk->invalid_pages, nr_pgs_per_blk))
> @@ -303,6 +312,7 @@ static int rrpc_move_valid_pages(struct rrpc *rrpc, struct rrpc_block *rblk)
>  	if (!page)
>  		return -ENOMEM;
>  
> +	poffset = lun_poffset(rlun->parent, rrpc->dev);
>  	while ((slot = find_first_zero_bit(rblk->invalid_pages,
>  					    nr_pgs_per_blk)) < nr_pgs_per_blk) {
>  
> @@ -310,23 +320,23 @@ static int rrpc_move_valid_pages(struct rrpc *rrpc, struct rrpc_block *rblk)
>  		phys_addr = (rblk->parent->id * nr_pgs_per_blk) + slot;
>  
>  try:
> -		spin_lock(&rrpc->rev_lock);
> +		spin_lock(&rlun->rev_lock);
>  		/* Get logical address from physical to logical table */
> -		rev = &rrpc->rev_trans_map[phys_addr - rrpc->poffset];
> +		rev = &rlun->rev_trans_map[phys_addr - poffset];
>  		/* already updated by previous regular write */
>  		if (rev->addr == ADDR_EMPTY) {
> -			spin_unlock(&rrpc->rev_lock);
> +			spin_unlock(&rlun->rev_lock);
>  			continue;
>  		}
>  
>  		rqd = rrpc_inflight_laddr_acquire(rrpc, rev->addr, 1);
>  		if (IS_ERR_OR_NULL(rqd)) {
> -			spin_unlock(&rrpc->rev_lock);
> +			spin_unlock(&rlun->rev_lock);
>  			schedule();
>  			goto try;
>  		}
>  
> -		spin_unlock(&rrpc->rev_lock);
> +		spin_unlock(&rlun->rev_lock);
>  
>  		/* Perform read to do GC */
>  		bio->bi_iter.bi_sector = rrpc_get_sector(rev->addr);
> @@ -395,7 +405,7 @@ static void rrpc_block_gc(struct work_struct *work)
>  	struct rrpc_block *rblk = gcb->rblk;
>  	struct nvm_dev *dev = rrpc->dev;
>  	struct nvm_lun *lun = rblk->parent->lun;
> -	struct rrpc_lun *rlun = &rrpc->luns[lun->id - rrpc->lun_offset];
> +	struct rrpc_lun *rlun = lun->private;
>  
>  	mempool_free(gcb, rrpc->gcb_pool);
>  	pr_debug("nvm: block '%lu' being reclaimed\n", rblk->parent->id);
> @@ -498,7 +508,7 @@ static void rrpc_gc_queue(struct work_struct *work)
>  	struct rrpc_block *rblk = gcb->rblk;
>  	struct nvm_lun *lun = rblk->parent->lun;
>  	struct nvm_block *blk = rblk->parent;
> -	struct rrpc_lun *rlun = &rrpc->luns[lun->id - rrpc->lun_offset];
> +	struct rrpc_lun *rlun = lun->private;
>  
>  	spin_lock(&rlun->lock);
>  	list_add_tail(&rblk->prio, &rlun->prio_list);
> @@ -549,22 +559,24 @@ static struct rrpc_lun *rrpc_get_lun_rr(struct rrpc *rrpc, int is_gc)
>  static struct rrpc_addr *rrpc_update_map(struct rrpc *rrpc, sector_t laddr,
>  					struct rrpc_block *rblk, u64 paddr)
>  {
> +	struct rrpc_lun *rlun = rblk->rlun;
>  	struct rrpc_addr *gp;
>  	struct rrpc_rev_addr *rev;
> +	u64 poffset = lun_poffset(rlun->parent, rrpc->dev);
>  
>  	BUG_ON(laddr >= rrpc->nr_sects);
>  
>  	gp = &rrpc->trans_map[laddr];
> -	spin_lock(&rrpc->rev_lock);
> +	spin_lock(&rlun->rev_lock);
>  	if (gp->rblk)
>  		rrpc_page_invalidate(rrpc, gp);Having a new line would increase readability
>  
>  	gp->addr = paddr;
>  	gp->rblk = rblk;
>  
> -	rev = &rrpc->rev_trans_map[gp->addr - rrpc->poffset];
> +	rev = &rlun->rev_trans_map[gp->addr - poffset];
>  	rev->addr = laddr;
> -	spin_unlock(&rrpc->rev_lock);
> +	spin_unlock(&rlun->rev_lock);
>  
>  	return gp;
>  }
> @@ -953,8 +965,6 @@ static void rrpc_requeue(struct work_struct *work)
>  
>  static void rrpc_gc_free(struct rrpc *rrpc)
>  {
> -	struct rrpc_lun *rlun;
> -	int i;
>  
>  	if (rrpc->krqd_wq)
>  		destroy_workqueue(rrpc->krqd_wq);
> @@ -962,16 +972,6 @@ static void rrpc_gc_free(struct rrpc *rrpc)
>  	if (rrpc->kgc_wq)
>  		destroy_workqueue(rrpc->kgc_wq);
>  
> -	if (!rrpc->luns)
> -		return;
> -
> -	for (i = 0; i < rrpc->nr_luns; i++) {
> -		rlun = &rrpc->luns[i];
> -
> -		if (!rlun->blocks)
> -			break;
> -		vfree(rlun->blocks);
> -	}
>  }
>  
>  static int rrpc_gc_init(struct rrpc *rrpc)
> @@ -992,7 +992,6 @@ static int rrpc_gc_init(struct rrpc *rrpc)
>  
>  static void rrpc_map_free(struct rrpc *rrpc)
>  {
> -	vfree(rrpc->rev_trans_map);
>  	vfree(rrpc->trans_map);
>  }
>  
> @@ -1000,8 +999,8 @@ static int rrpc_l2p_update(u64 slba, u32 nlb, __le64 *entries, void *private)
>  {
>  	struct rrpc *rrpc = (struct rrpc *)private;
>  	struct nvm_dev *dev = rrpc->dev;
> -	struct rrpc_addr *addr = rrpc->trans_map + slba;
> -	struct rrpc_rev_addr *raddr = rrpc->rev_trans_map;
> +	struct rrpc_addr *addr;
> +	struct rrpc_rev_addr *raddr;
>  	u64 elba = slba + nlb;
>  	u64 i;
>  
> @@ -1010,8 +1009,15 @@ static int rrpc_l2p_update(u64 slba, u32 nlb, __le64 *entries, void *private)
>  		return -EINVAL;
>  	}
>  
> +	slba -= rrpc->soffset >> (ilog2(dev->sec_size) - 9);
> +	addr = rrpc->trans_map + slba;
>  	for (i = 0; i < nlb; i++) {
> +		struct rrpc_lun *rlun;
> +		struct nvm_lun *lun;
>  		u64 pba = le64_to_cpu(entries[i]);
> +		u64 poffset;
> +		int lunid;
> +
>  		/* LNVM treats address-spaces as silos, LBA and PBA are
>  		 * equally large and zero-indexed.
>  		 */
> @@ -1026,9 +1032,16 @@ static int rrpc_l2p_update(u64 slba, u32 nlb, __le64 *entries, void *private)
>  		 */
>  		if (!pba)
>  			continue;

A new line will increase readability-

> +		lunid = div_u64(pba, dev->sec_per_lun);
> +		lun = dev->mt->get_lun(dev, lunid);
> +		if (unlikely(!lun))
> +			return -EINVAL;

Same

> +		rlun = lun->private;
> +		raddr = rlun->rev_trans_map;
> +		poffset = lun_poffset(lun, dev);
>  
>  		addr[i].addr = pba;
> -		raddr[pba].addr = slba + i;
> +		raddr[pba - poffset].addr = slba + i;
>  	}
>  
>  	return 0;
> @@ -1047,17 +1060,10 @@ static int rrpc_map_init(struct rrpc *rrpc)
>  	if (!rrpc->trans_map)
>  		return -ENOMEM;
>  
> -	rrpc->rev_trans_map = vmalloc(sizeof(struct rrpc_rev_addr)
> -							* rrpc->nr_sects);
> -	if (!rrpc->rev_trans_map)
> -		return -ENOMEM;
> -
>  	for (i = 0; i < rrpc->nr_sects; i++) {
>  		struct rrpc_addr *p = &rrpc->trans_map[i];
> -		struct rrpc_rev_addr *r = &rrpc->rev_trans_map[i];
>  
>  		p->addr = ADDR_EMPTY;
> -		r->addr = ADDR_EMPTY;
>  	}
>  
>  	if (!dev->ops->get_l2p_tbl)
> @@ -1129,8 +1135,8 @@ static void rrpc_core_free(struct rrpc *rrpc)
>  static void rrpc_luns_free(struct rrpc *rrpc)
>  {
>  	struct nvm_dev *dev = rrpc->dev;
> -	struct nvm_lun *lun;
>  	struct rrpc_lun *rlun;
> +	struct nvm_lun *lun;
>  	int i;
>  
>  	if (!rrpc->luns)
> @@ -1142,24 +1148,68 @@ static void rrpc_luns_free(struct rrpc *rrpc)
>  		if (!lun)
>  			break;
>  		dev->mt->release_lun(dev, lun->id);
> +		vfree(rlun->rev_trans_map);
>  		vfree(rlun->blocks);
>  	}
>  	kfree(rrpc->luns);
> +	rrpc->luns = NULL;
> +

No new line needed-

> +}
> +
> +static int rrpc_lun_init(struct rrpc *rrpc, struct rrpc_lun *rlun,
> +			struct nvm_lun *lun)
> +{
> +	struct nvm_dev *dev = rrpc->dev;
> +	int i;
> +
> +	rlun->rev_trans_map = vmalloc(sizeof(struct rrpc_rev_addr) *
> +					dev->sec_per_lun);
> +	if (!rlun->rev_trans_map)
> +		return -ENOMEM;
> +
> +	for (i = 0; i < dev->sec_per_lun; i++) {
> +		struct rrpc_rev_addr *r = &rlun->rev_trans_map[i];
> +
> +		r->addr = ADDR_EMPTY;
> +	}
> +	rlun->blocks = vzalloc(sizeof(struct rrpc_block) * dev->blks_per_lun);
> +	if (!rlun->blocks) {
> +		vfree(rlun->rev_trans_map);
> +		return -ENOMEM;
> +	}
> +
> +	for (i = 0; i < dev->blks_per_lun; i++) {
> +		struct rrpc_block *rblk = &rlun->blocks[i];
> +		struct nvm_block *blk = &lun->blocks[i];
> +
> +		rblk->parent = blk;
> +		rblk->rlun = rlun;
> +		INIT_LIST_HEAD(&rblk->prio);
> +		spin_lock_init(&rblk->lock);
> +	}
> +
> +	rlun->rrpc = rrpc;
> +	lun->private = rlun;
> +	INIT_LIST_HEAD(&rlun->prio_list);
> +	INIT_LIST_HEAD(&rlun->open_list);
> +	INIT_LIST_HEAD(&rlun->closed_list);
> +	INIT_WORK(&rlun->ws_gc, rrpc_lun_gc);
> +	spin_lock_init(&rlun->lock);
> +	spin_lock_init(&rlun->rev_lock);

A new line would be great here.

> +	return 0;
>  }
>  
> -static int rrpc_luns_init(struct rrpc *rrpc, int lun_begin, int lun_end)
> +static int rrpc_luns_init(struct rrpc *rrpc, struct nvm_ioctl_create_conf *conf)
>  {
>  	struct nvm_dev *dev = rrpc->dev;
>  	struct rrpc_lun *rlun;
> -	int i, j, ret = -EINVAL;
> +	int i, ret = -EINVAL;
>  
>  	if (dev->pgs_per_blk > MAX_INVALID_PAGES_STORAGE * BITS_PER_LONG) {
>  		pr_err("rrpc: number of pages per block too high.");
>  		return -EINVAL;
>  	}
>  
> -	spin_lock_init(&rrpc->rev_lock);
> -
>  	rrpc->luns = kcalloc(rrpc->nr_luns, sizeof(struct rrpc_lun),
>  								GFP_KERNEL);
>  	if (!rrpc->luns)
> @@ -1167,9 +1217,20 @@ static int rrpc_luns_init(struct rrpc *rrpc, int lun_begin, int lun_end)
>  
>  	/* 1:1 mapping */
>  	for (i = 0; i < rrpc->nr_luns; i++) {
> -		int lunid = lun_begin + i;
>  		struct nvm_lun *lun;
> +		int lunid;
>  
> +		if (conf->type == NVM_CONFIG_TYPE_SIMPLE)
> +			lunid = conf->s.lun_begin + i;
> +		else if (conf->type == NVM_CONFIG_TYPE_LIST)
> +			lunid = conf->l.lunid[i];
> +		else
> +			goto err;

It makes it more readable to insert a blank line here.

> +		if (lunid >= dev->nr_luns) {
> +			pr_err("rrpc: lun out of bound (%u >= %u)\n",
> +						lunid, dev->nr_luns);
> +			goto err;
> +		}

Same

>  		if (dev->mt->reserve_lun(dev, lunid)) {
>  			pr_err("rrpc: lun %u is already allocated\n", lunid);
>  			goto err;
> @@ -1181,31 +1242,9 @@ static int rrpc_luns_init(struct rrpc *rrpc, int lun_begin, int lun_end)
>  
>  		rlun = &rrpc->luns[i];
>  		rlun->parent = lun;
> -		rlun->blocks = vzalloc(sizeof(struct rrpc_block) *
> -						rrpc->dev->blks_per_lun);
> -		if (!rlun->blocks) {
> -			ret = -ENOMEM;
> +		ret = rrpc_lun_init(rrpc, rlun, lun);
> +		if (!ret)
>  			goto err;
> -		}
> -
> -		for (j = 0; j < rrpc->dev->blks_per_lun; j++) {
> -			struct rrpc_block *rblk = &rlun->blocks[j];
> -			struct nvm_block *blk = &lun->blocks[j];
> -
> -			rblk->parent = blk;
> -			rblk->rlun = rlun;
> -			INIT_LIST_HEAD(&rblk->prio);
> -			spin_lock_init(&rblk->lock);
> -		}
> -
> -		rlun->rrpc = rrpc;
> -		INIT_LIST_HEAD(&rlun->prio_list);
> -		INIT_LIST_HEAD(&rlun->open_list);
> -		INIT_LIST_HEAD(&rlun->closed_list);
> -
> -		INIT_WORK(&rlun->ws_gc, rrpc_lun_gc);
> -		spin_lock_init(&rlun->lock);
> -
>  		rrpc->total_blocks += dev->blks_per_lun;
>  		rrpc->nr_sects += dev->sec_per_lun;
>  
> @@ -1213,6 +1252,7 @@ static int rrpc_luns_init(struct rrpc *rrpc, int lun_begin, int lun_end)
>  
>  	return 0;
>  err:
> +	rrpc_luns_free(rrpc);
>  	return ret;
>  }
>  
> @@ -1285,14 +1325,16 @@ static sector_t rrpc_capacity(void *private)
>  static void rrpc_block_map_update(struct rrpc *rrpc, struct rrpc_block *rblk)
>  {
>  	struct nvm_dev *dev = rrpc->dev;
> +	struct rrpc_lun *rlun = rblk->rlun;
>  	int offset;
>  	struct rrpc_addr *laddr;
> -	u64 paddr, pladdr;
> +	u64 paddr, pladdr, poffset;
>  
> +	poffset = lun_poffset(rlun->parent, dev);
>  	for (offset = 0; offset < dev->pgs_per_blk; offset++) {
>  		paddr = block_to_addr(rrpc, rblk) + offset;
>  
> -		pladdr = rrpc->rev_trans_map[paddr].addr;
> +		pladdr = rlun->rev_trans_map[paddr - poffset].addr;
>  		if (pladdr == ADDR_EMPTY)
>  			continue;
>  
> @@ -1357,7 +1399,7 @@ err:
>  static struct nvm_tgt_type tt_rrpc;
>  
>  static void *rrpc_init(struct nvm_dev *dev, struct gendisk *tdisk,
> -						int lun_begin, int lun_end)
> +		struct nvm_ioctl_create_conf *conf)
>  {
>  	struct request_queue *bqueue = dev->q;
>  	struct request_queue *tqueue = tdisk->queue;
> @@ -1383,7 +1425,16 @@ static void *rrpc_init(struct nvm_dev *dev, struct gendisk *tdisk,
>  	spin_lock_init(&rrpc->bio_lock);
>  	INIT_WORK(&rrpc->ws_requeue, rrpc_requeue);
>  
> -	rrpc->nr_luns = lun_end - lun_begin + 1;
> +	if (conf->type == NVM_CONFIG_TYPE_SIMPLE)
> +		rrpc->nr_luns =
> +		conf->s.lun_end  - conf->s.lun_begin + 1;
> +
> +	else if (conf->type == NVM_CONFIG_TYPE_LIST)
> +		rrpc->nr_luns = conf->l.nr_luns;
> +	else {
> +		kfree(rrpc);
> +		return ERR_PTR(-EINVAL);
> +	}
>  
>  	/* simple round-robin strategy */
>  	atomic_set(&rrpc->next_lun, -1);
> @@ -1395,15 +1446,12 @@ static void *rrpc_init(struct nvm_dev *dev, struct gendisk *tdisk,
>  	}
>  	rrpc->soffset = soffset;
>  
> -	ret = rrpc_luns_init(rrpc, lun_begin, lun_end);
> +	ret = rrpc_luns_init(rrpc, conf);
>  	if (ret) {
>  		pr_err("nvm: rrpc: could not initialize luns\n");
>  		goto err;
>  	}
>  
> -	rrpc->poffset = dev->sec_per_lun * lun_begin;
> -	rrpc->lun_offset = lun_begin;
> -
>  	ret = rrpc_core_init(rrpc);
>  	if (ret) {
>  		pr_err("nvm: rrpc: could not initialize core\n");
> diff --git a/drivers/lightnvm/rrpc.h b/drivers/lightnvm/rrpc.h
> index 6148b14..abe9135 100644
> --- a/drivers/lightnvm/rrpc.h
> +++ b/drivers/lightnvm/rrpc.h
> @@ -87,6 +87,10 @@ struct rrpc_lun {
>  
>  	struct work_struct ws_gc;
>  
> +	/* store a reverse map for garbage collection */
> +	struct rrpc_rev_addr *rev_trans_map;
> +	spinlock_t rev_lock;
> +
>  	spinlock_t lock;
>  };
>  
> @@ -124,9 +128,6 @@ struct rrpc {
>  	 * addresses are used when writing to the disk block device.
>  	 */
>  	struct rrpc_addr *trans_map;
> -	/* also store a reverse map for garbage collection */
> -	struct rrpc_rev_addr *rev_trans_map;
> -	spinlock_t rev_lock;
>  
>  	struct rrpc_inflight inflights;
>  
> diff --git a/include/linux/lightnvm.h b/include/linux/lightnvm.h
> index 2a17dc1..d2f2632 100644
> --- a/include/linux/lightnvm.h
> +++ b/include/linux/lightnvm.h
> @@ -271,6 +271,7 @@ struct nvm_lun {
>  	spinlock_t lock;
>  
>  	struct nvm_block *blocks;
> +	void *private;
>  };
>  
>  enum {
> @@ -425,7 +426,8 @@ static inline int ppa_to_slc(struct nvm_dev *dev, int slc_pg)
>  
>  typedef blk_qc_t (nvm_tgt_make_rq_fn)(struct request_queue *, struct bio *);
>  typedef sector_t (nvm_tgt_capacity_fn)(void *);
> -typedef void *(nvm_tgt_init_fn)(struct nvm_dev *, struct gendisk *, int, int);
> +typedef void *(nvm_tgt_init_fn)(struct nvm_dev *, struct gendisk *,
> +			struct nvm_ioctl_create_conf *);
>  typedef void (nvm_tgt_exit_fn)(void *);
>  
>  struct nvm_tgt_type {
> diff --git a/include/uapi/linux/lightnvm.h b/include/uapi/linux/lightnvm.h
> index 774a431..23ebc0c9 100644
> --- a/include/uapi/linux/lightnvm.h
> +++ b/include/uapi/linux/lightnvm.h
> @@ -35,6 +35,8 @@
>  #define NVM_TTYPE_MAX 63
>  #define NVM_MMTYPE_LEN 8
>  
> +#define NVM_LUNS_MAX 1024

Let's limit it to 768. That way we don't go above a single memory page,
and we have room for other variables when needed.

> +
>  #define NVM_CTRL_FILE "/dev/lightnvm/control"
>  
>  struct nvm_ioctl_info_tgt {
> @@ -74,14 +76,21 @@ struct nvm_ioctl_create_simple {
>  	__u32 lun_end;
>  };
>  
> +struct nvm_ioctl_create_list {
> +	__u32 nr_luns;
> +	__u32 lunid[NVM_LUNS_MAX];
> +};
> +
>  enum {
>  	NVM_CONFIG_TYPE_SIMPLE = 0,
> +	NVM_CONFIG_TYPE_LIST,
>  };
>  
>  struct nvm_ioctl_create_conf {
>  	__u32 type;
>  	union {
>  		struct nvm_ioctl_create_simple s;
> +		struct nvm_ioctl_create_list l;
>  	};
>  };
>  
> @@ -101,6 +110,12 @@ struct nvm_ioctl_remove {
>  	__u32 flags;
>  };
>  
> +struct nvm_ioctl_free_luns {
> +	char dev[DISK_NAME_LEN];
> +	__u32 nr_free_luns;
> +	__u32 free_lunid[NVM_LUNS_MAX];
> +};
> +
>  struct nvm_ioctl_dev_init {
>  	char dev[DISK_NAME_LEN];		/* open-channel SSD device */
>  	char mmtype[NVM_MMTYPE_LEN];		/* register to media manager */
> @@ -131,6 +146,7 @@ enum {
>  	/* device level cmds */
>  	NVM_DEV_CREATE_CMD,
>  	NVM_DEV_REMOVE_CMD,
> +	NVM_DEV_FREE_LUNS_CMD,
>  
>  	/* Init a device to support LightNVM media managers */
>  	NVM_DEV_INIT_CMD,
> @@ -149,6 +165,8 @@ enum {
>  						struct nvm_ioctl_create)
>  #define NVM_DEV_REMOVE		_IOW(NVM_IOCTL, NVM_DEV_REMOVE_CMD, \
>  						struct nvm_ioctl_remove)
> +#define NVM_DEV_FREE_LUNS	_IOW(NVM_IOCTL,	NVM_DEV_FREE_LUNS_CMD, \
> +						struct nvm_ioctl_free_luns)
>  #define NVM_DEV_INIT		_IOW(NVM_IOCTL, NVM_DEV_INIT_CMD, \
>  						struct nvm_ioctl_dev_init)
>  #define NVM_DEV_FACTORY		_IOW(NVM_IOCTL, NVM_DEV_FACTORY_CMD, \
> 

The patch is starting to look good.

I think it would be great to move NVM_DEV_FREE_LUNS into its own patch
and rename it to:

NVM_DEV_LUNS_STATUS, and then return per lun information such as

 - Id
 - Lun is reserved/available
 - Local lunid on channel
 - Channel id
 - Nr_open_blocks, nr_closed_blocks, nr_free_blocks, nr_bad_blocks

or something similar?

Thanks

^ permalink raw reply	[flat|nested] 9+ messages in thread

* Re: [PATCH v3 3/3] lightnvm: add non-continuous lun target creation support
  2016-02-05 12:55   ` Matias Bjørling
@ 2016-02-05 14:19     ` Wenwei Tao
  0 siblings, 0 replies; 9+ messages in thread
From: Wenwei Tao @ 2016-02-05 14:19 UTC (permalink / raw)
  To: Matias Bjørling; +Cc: linux-kernel, linux-block

okay, I agree it's good to export these information to user. Will
include these changes in the next version.

2016-02-05 20:55 GMT+08:00 Matias Bjørling <mb@lightnvm.io>:
> On 02/05/2016 03:42 AM, Wenwei Tao wrote:
>> When create a target, we specify the begin lunid and
>> the end lunid, and get the corresponding continuous
>> luns from media manager, if one of the luns is not free,
>> we failed to create the target, even if the device's
>> total free luns are enough.
>>
>> So add non-continuous lun target creation support,
>> thus we can improve the backend device's space utilization.
>>
>> Signed-off-by: Wenwei Tao <ww.tao0320@gmail.com>
>> ---
>> Changes since v2
>> -rebase on for-next branch
>> -move luns bitmap to PATCH 2
>> -remove the logic to dynamically select another lun than
>> the one requested
>> -implement lunid list in the lnvm ioctl interface
>>
>> Changes since v1
>> -use NVM_FIXED instead NVM_C_FIXED in gennvm_get_lun
>> -add target creation flags check
>> -rebase to v4.5-rc1
>>
>>  drivers/lightnvm/core.c       | 101 ++++++++++++++------
>>  drivers/lightnvm/rrpc.c       | 208 ++++++++++++++++++++++++++----------------
>>  drivers/lightnvm/rrpc.h       |   7 +-
>>  include/linux/lightnvm.h      |   4 +-
>>  include/uapi/linux/lightnvm.h |  18 ++++
>>  5 files changed, 228 insertions(+), 110 deletions(-)
>>
>> diff --git a/drivers/lightnvm/core.c b/drivers/lightnvm/core.c
>> index 11b8e2d..cbfd575 100644
>> --- a/drivers/lightnvm/core.c
>> +++ b/drivers/lightnvm/core.c
>> @@ -622,7 +622,7 @@ static const struct block_device_operations nvm_fops = {
>>  static int nvm_create_target(struct nvm_dev *dev,
>>                                               struct nvm_ioctl_create *create)
>>  {
>> -     struct nvm_ioctl_create_simple *s = &create->conf.s;
>> +     struct nvm_ioctl_create_conf *conf = &create->conf;
>>       struct request_queue *tqueue;
>>       struct gendisk *tdisk;
>>       struct nvm_tgt_type *tt;
>> @@ -671,7 +671,7 @@ static int nvm_create_target(struct nvm_dev *dev,
>>       tdisk->fops = &nvm_fops;
>>       tdisk->queue = tqueue;
>>
>> -     targetdata = tt->init(dev, tdisk, s->lun_begin, s->lun_end);
>> +     targetdata = tt->init(dev, tdisk, conf);
>>       if (IS_ERR(targetdata))
>>               goto err_init;
>>
>> @@ -723,7 +723,6 @@ static void nvm_remove_target(struct nvm_target *t)
>>  static int __nvm_configure_create(struct nvm_ioctl_create *create)
>>  {
>>       struct nvm_dev *dev;
>> -     struct nvm_ioctl_create_simple *s;
>>
>>       down_write(&nvm_lock);
>>       dev = nvm_find_nvm_dev(create->dev);
>> @@ -733,17 +732,11 @@ static int __nvm_configure_create(struct nvm_ioctl_create *create)
>>               return -EINVAL;
>>       }
>>
>> -     if (create->conf.type != NVM_CONFIG_TYPE_SIMPLE) {
>> +     if (create->conf.type != NVM_CONFIG_TYPE_SIMPLE &&
>> +             create->conf.type != NVM_CONFIG_TYPE_LIST) {
>>               pr_err("nvm: config type not valid\n");
>>               return -EINVAL;
>>       }
>> -     s = &create->conf.s;
>> -
>> -     if (s->lun_begin > s->lun_end || s->lun_end > dev->nr_luns) {
>> -             pr_err("nvm: lun out of bound (%u:%u > %u)\n",
>> -                     s->lun_begin, s->lun_end, dev->nr_luns);
>> -             return -EINVAL;
>> -     }
>>
>>       return nvm_create_target(dev, create);
>>  }
>> @@ -821,24 +814,29 @@ static int nvm_configure_remove(const char *val)
>>
>>  static int nvm_configure_create(const char *val)
>>  {
>> -     struct nvm_ioctl_create create;
>> +     struct nvm_ioctl_create *create;
>>       char opcode;
>>       int lun_begin, lun_end, ret;
>>
>> -     ret = sscanf(val, "%c %256s %256s %48s %u:%u", &opcode, create.dev,
>> -                                             create.tgtname, create.tgttype,
>> +     create = kzalloc(sizeof(struct nvm_ioctl_create), GFP_KERNEL);
>> +     if (!create)
>> +             return -ENOMEM;
>> +
>> +     ret = sscanf(val, "%c %256s %256s %48s %u:%u", &opcode, create->dev,
>> +                                     create->tgtname, create->tgttype,
>>                                               &lun_begin, &lun_end);
>>       if (ret != 6) {
>>               pr_err("nvm: invalid command. Use \"opcode device name tgttype lun_begin:lun_end\".\n");
>> +             kfree(create);
>>               return -EINVAL;
>>       }
>>
>> -     create.flags = 0;
>> -     create.conf.type = NVM_CONFIG_TYPE_SIMPLE;
>> -     create.conf.s.lun_begin = lun_begin;
>> -     create.conf.s.lun_end = lun_end;
>> +     create->flags = 0;
>> +     create->conf.type = NVM_CONFIG_TYPE_SIMPLE;
>> +     create->conf.s.lun_begin = lun_begin;
>> +     create->conf.s.lun_end = lun_end;
>>
>> -     return __nvm_configure_create(&create);
>> +     return __nvm_configure_create(create);
>>  }
>>
>>
>> @@ -991,24 +989,30 @@ static long nvm_ioctl_get_devices(struct file *file, void __user *arg)
>>
>>  static long nvm_ioctl_dev_create(struct file *file, void __user *arg)
>>  {
>> -     struct nvm_ioctl_create create;
>> +     struct nvm_ioctl_create *create;
>>
>>       if (!capable(CAP_SYS_ADMIN))
>>               return -EPERM;
>> +     create = kzalloc(sizeof(struct nvm_ioctl_create), GFP_KERNEL);
>> +     if (!create)
>> +             return -ENOMEM;
>>
>> -     if (copy_from_user(&create, arg, sizeof(struct nvm_ioctl_create)))
>> +     if (copy_from_user(&create, arg, sizeof(struct nvm_ioctl_create))) {
>> +             kfree(create);
>>               return -EFAULT;
>> +     }
>>
>> -     create.dev[DISK_NAME_LEN - 1] = '\0';
>> -     create.tgttype[NVM_TTYPE_NAME_MAX - 1] = '\0';
>> -     create.tgtname[DISK_NAME_LEN - 1] = '\0';
>> +     create->dev[DISK_NAME_LEN - 1] = '\0';
>> +     create->tgttype[NVM_TTYPE_NAME_MAX - 1] = '\0';
>> +     create->tgtname[DISK_NAME_LEN - 1] = '\0';
>>
>> -     if (create.flags != 0) {
>> +     if (create->flags != 0) {
>>               pr_err("nvm: no flags supported\n");
>> +             kfree(create);
>>               return -EINVAL;
>>       }
>>
>> -     return __nvm_configure_create(&create);
>> +     return __nvm_configure_create(create);
>>  }
>>
>>  static long nvm_ioctl_dev_remove(struct file *file, void __user *arg)
>> @@ -1031,6 +1035,49 @@ static long nvm_ioctl_dev_remove(struct file *file, void __user *arg)
>>       return __nvm_configure_remove(&remove);
>>  }
>>
>> +static long nvm_ioctl_dev_free_luns(struct file *file, void __user *arg)
>> +{
>> +     struct nvm_ioctl_free_luns *free_luns;
>> +     struct nvm_dev *dev;
>> +     int lunid = 0;
>> +
>> +     if (!capable(CAP_SYS_ADMIN))
>> +             return -EPERM;
>> +
>> +     free_luns = kzalloc(sizeof(struct nvm_ioctl_free_luns), GFP_KERNEL);
>> +     if (!free_luns)
>> +             return -ENOMEM;
>> +
>> +     if (copy_from_user(&free_luns, arg,
>> +             sizeof(struct nvm_ioctl_free_luns))) {
>> +             kfree(free_luns);
>> +             return -EFAULT;
>> +     }
>> +     free_luns->dev[DISK_NAME_LEN - 1] = '\0';
>> +     down_write(&nvm_lock);
>> +     dev = nvm_find_nvm_dev(free_luns->dev);
>> +     up_write(&nvm_lock);
>> +     if (!dev) {
>> +             pr_err("nvm: device not found\n");
>> +             kfree(free_luns);
>> +             return -EINVAL;
>> +     }
>> +
>> +     free_luns->nr_free_luns = 0;
>> +     while ((lunid = find_next_zero_bit(dev->lun_map, dev->nr_luns,
>> +                             lunid)) < dev->nr_luns) {
>> +
>> +             if (free_luns->nr_free_luns >= NVM_LUNS_MAX) {
>> +                     pr_err("nvm: max %u free luns can be reported.\n",
>> +                                                     NVM_LUNS_MAX);
>> +                     break;
>> +             }
>> +             free_luns->free_lunid[free_luns->nr_free_luns++] = lunid;
>> +             lunid++;
>> +     }
>> +     return 0;
>> +}
>> +
>>  static void nvm_setup_nvm_sb_info(struct nvm_sb_info *info)
>>  {
>>       info->seqnr = 1;
>> @@ -1135,6 +1182,8 @@ static long nvm_ctl_ioctl(struct file *file, uint cmd, unsigned long arg)
>>               return nvm_ioctl_dev_create(file, argp);
>>       case NVM_DEV_REMOVE:
>>               return nvm_ioctl_dev_remove(file, argp);
>> +     case NVM_DEV_FREE_LUNS:
>> +             return nvm_ioctl_dev_free_luns(file, argp);
>>       case NVM_DEV_INIT:
>>               return nvm_ioctl_dev_init(file, argp);
>>       case NVM_DEV_FACTORY:
>> diff --git a/drivers/lightnvm/rrpc.c b/drivers/lightnvm/rrpc.c
>> index 2bd5789..88b395d 100644
>> --- a/drivers/lightnvm/rrpc.c
>> +++ b/drivers/lightnvm/rrpc.c
>> @@ -23,28 +23,35 @@ static int rrpc_submit_io(struct rrpc *rrpc, struct bio *bio,
>>                               struct nvm_rq *rqd, unsigned long flags);
>>
>>  #define rrpc_for_each_lun(rrpc, rlun, i) \
>> -             for ((i) = 0, rlun = &(rrpc)->luns[0]; \
>> -                     (i) < (rrpc)->nr_luns; (i)++, rlun = &(rrpc)->luns[(i)])
>> +     for ((i) = 0, rlun = &(rrpc)->luns[0]; \
>> +             (i) < (rrpc)->nr_luns; (i)++, rlun = &(rrpc)->luns[(i)])
>> +
>> +static inline u64 lun_poffset(struct nvm_lun *lun, struct nvm_dev *dev)
>> +{
>> +     return lun->id * dev->sec_per_lun;
>> +}
>>
>>  static void rrpc_page_invalidate(struct rrpc *rrpc, struct rrpc_addr *a)
>>  {
>>       struct rrpc_block *rblk = a->rblk;
>> -     unsigned int pg_offset;
>> +     struct rrpc_lun *rlun = rblk->rlun;
>> +     u64 pg_offset;
>>
>> -     lockdep_assert_held(&rrpc->rev_lock);
>> +     lockdep_assert_held(&rlun->rev_lock);
>>
>>       if (a->addr == ADDR_EMPTY || !rblk)
>>               return;
>>
>>       spin_lock(&rblk->lock);
>>
>> -     div_u64_rem(a->addr, rrpc->dev->pgs_per_blk, &pg_offset);
>> +     div_u64_rem(a->addr, rrpc->dev->pgs_per_blk, (u32 *)&pg_offset);
>>       WARN_ON(test_and_set_bit(pg_offset, rblk->invalid_pages));
>>       rblk->nr_invalid_pages++;
>>
>>       spin_unlock(&rblk->lock);
>>
>> -     rrpc->rev_trans_map[a->addr - rrpc->poffset].addr = ADDR_EMPTY;
>> +     pg_offset = lun_poffset(rlun->parent, rrpc->dev);
>> +     rlun->rev_trans_map[a->addr - pg_offset].addr = ADDR_EMPTY;
>>  }
>>
>>  static void rrpc_invalidate_range(struct rrpc *rrpc, sector_t slba,
>> @@ -52,14 +59,15 @@ static void rrpc_invalidate_range(struct rrpc *rrpc, sector_t slba,
>>  {
>>       sector_t i;
>>
>> -     spin_lock(&rrpc->rev_lock);
>>       for (i = slba; i < slba + len; i++) {
>>               struct rrpc_addr *gp = &rrpc->trans_map[i];
>> +             struct rrpc_lun *rlun = gp->rblk->rlun;
>>
>> +             spin_lock(&rlun->rev_lock);
>>               rrpc_page_invalidate(rrpc, gp);
>> +             spin_unlock(&rlun->rev_lock);
>>               gp->rblk = NULL;
>>       }
>> -     spin_unlock(&rrpc->rev_lock);
>>  }
>>
>>  static struct nvm_rq *rrpc_inflight_laddr_acquire(struct rrpc *rrpc,
>> @@ -281,13 +289,14 @@ static void rrpc_end_sync_bio(struct bio *bio)
>>  static int rrpc_move_valid_pages(struct rrpc *rrpc, struct rrpc_block *rblk)
>>  {
>>       struct request_queue *q = rrpc->dev->q;
>> +     struct rrpc_lun *rlun = rblk->rlun;
>>       struct rrpc_rev_addr *rev;
>>       struct nvm_rq *rqd;
>>       struct bio *bio;
>>       struct page *page;
>>       int slot;
>>       int nr_pgs_per_blk = rrpc->dev->pgs_per_blk;
>> -     u64 phys_addr;
>> +     u64 phys_addr, poffset;
>>       DECLARE_COMPLETION_ONSTACK(wait);
>>
>>       if (bitmap_full(rblk->invalid_pages, nr_pgs_per_blk))
>> @@ -303,6 +312,7 @@ static int rrpc_move_valid_pages(struct rrpc *rrpc, struct rrpc_block *rblk)
>>       if (!page)
>>               return -ENOMEM;
>>
>> +     poffset = lun_poffset(rlun->parent, rrpc->dev);
>>       while ((slot = find_first_zero_bit(rblk->invalid_pages,
>>                                           nr_pgs_per_blk)) < nr_pgs_per_blk) {
>>
>> @@ -310,23 +320,23 @@ static int rrpc_move_valid_pages(struct rrpc *rrpc, struct rrpc_block *rblk)
>>               phys_addr = (rblk->parent->id * nr_pgs_per_blk) + slot;
>>
>>  try:
>> -             spin_lock(&rrpc->rev_lock);
>> +             spin_lock(&rlun->rev_lock);
>>               /* Get logical address from physical to logical table */
>> -             rev = &rrpc->rev_trans_map[phys_addr - rrpc->poffset];
>> +             rev = &rlun->rev_trans_map[phys_addr - poffset];
>>               /* already updated by previous regular write */
>>               if (rev->addr == ADDR_EMPTY) {
>> -                     spin_unlock(&rrpc->rev_lock);
>> +                     spin_unlock(&rlun->rev_lock);
>>                       continue;
>>               }
>>
>>               rqd = rrpc_inflight_laddr_acquire(rrpc, rev->addr, 1);
>>               if (IS_ERR_OR_NULL(rqd)) {
>> -                     spin_unlock(&rrpc->rev_lock);
>> +                     spin_unlock(&rlun->rev_lock);
>>                       schedule();
>>                       goto try;
>>               }
>>
>> -             spin_unlock(&rrpc->rev_lock);
>> +             spin_unlock(&rlun->rev_lock);
>>
>>               /* Perform read to do GC */
>>               bio->bi_iter.bi_sector = rrpc_get_sector(rev->addr);
>> @@ -395,7 +405,7 @@ static void rrpc_block_gc(struct work_struct *work)
>>       struct rrpc_block *rblk = gcb->rblk;
>>       struct nvm_dev *dev = rrpc->dev;
>>       struct nvm_lun *lun = rblk->parent->lun;
>> -     struct rrpc_lun *rlun = &rrpc->luns[lun->id - rrpc->lun_offset];
>> +     struct rrpc_lun *rlun = lun->private;
>>
>>       mempool_free(gcb, rrpc->gcb_pool);
>>       pr_debug("nvm: block '%lu' being reclaimed\n", rblk->parent->id);
>> @@ -498,7 +508,7 @@ static void rrpc_gc_queue(struct work_struct *work)
>>       struct rrpc_block *rblk = gcb->rblk;
>>       struct nvm_lun *lun = rblk->parent->lun;
>>       struct nvm_block *blk = rblk->parent;
>> -     struct rrpc_lun *rlun = &rrpc->luns[lun->id - rrpc->lun_offset];
>> +     struct rrpc_lun *rlun = lun->private;
>>
>>       spin_lock(&rlun->lock);
>>       list_add_tail(&rblk->prio, &rlun->prio_list);
>> @@ -549,22 +559,24 @@ static struct rrpc_lun *rrpc_get_lun_rr(struct rrpc *rrpc, int is_gc)
>>  static struct rrpc_addr *rrpc_update_map(struct rrpc *rrpc, sector_t laddr,
>>                                       struct rrpc_block *rblk, u64 paddr)
>>  {
>> +     struct rrpc_lun *rlun = rblk->rlun;
>>       struct rrpc_addr *gp;
>>       struct rrpc_rev_addr *rev;
>> +     u64 poffset = lun_poffset(rlun->parent, rrpc->dev);
>>
>>       BUG_ON(laddr >= rrpc->nr_sects);
>>
>>       gp = &rrpc->trans_map[laddr];
>> -     spin_lock(&rrpc->rev_lock);
>> +     spin_lock(&rlun->rev_lock);
>>       if (gp->rblk)
>>               rrpc_page_invalidate(rrpc, gp);Having a new line would increase readability
>>
>>       gp->addr = paddr;
>>       gp->rblk = rblk;
>>
>> -     rev = &rrpc->rev_trans_map[gp->addr - rrpc->poffset];
>> +     rev = &rlun->rev_trans_map[gp->addr - poffset];
>>       rev->addr = laddr;
>> -     spin_unlock(&rrpc->rev_lock);
>> +     spin_unlock(&rlun->rev_lock);
>>
>>       return gp;
>>  }
>> @@ -953,8 +965,6 @@ static void rrpc_requeue(struct work_struct *work)
>>
>>  static void rrpc_gc_free(struct rrpc *rrpc)
>>  {
>> -     struct rrpc_lun *rlun;
>> -     int i;
>>
>>       if (rrpc->krqd_wq)
>>               destroy_workqueue(rrpc->krqd_wq);
>> @@ -962,16 +972,6 @@ static void rrpc_gc_free(struct rrpc *rrpc)
>>       if (rrpc->kgc_wq)
>>               destroy_workqueue(rrpc->kgc_wq);
>>
>> -     if (!rrpc->luns)
>> -             return;
>> -
>> -     for (i = 0; i < rrpc->nr_luns; i++) {
>> -             rlun = &rrpc->luns[i];
>> -
>> -             if (!rlun->blocks)
>> -                     break;
>> -             vfree(rlun->blocks);
>> -     }
>>  }
>>
>>  static int rrpc_gc_init(struct rrpc *rrpc)
>> @@ -992,7 +992,6 @@ static int rrpc_gc_init(struct rrpc *rrpc)
>>
>>  static void rrpc_map_free(struct rrpc *rrpc)
>>  {
>> -     vfree(rrpc->rev_trans_map);
>>       vfree(rrpc->trans_map);
>>  }
>>
>> @@ -1000,8 +999,8 @@ static int rrpc_l2p_update(u64 slba, u32 nlb, __le64 *entries, void *private)
>>  {
>>       struct rrpc *rrpc = (struct rrpc *)private;
>>       struct nvm_dev *dev = rrpc->dev;
>> -     struct rrpc_addr *addr = rrpc->trans_map + slba;
>> -     struct rrpc_rev_addr *raddr = rrpc->rev_trans_map;
>> +     struct rrpc_addr *addr;
>> +     struct rrpc_rev_addr *raddr;
>>       u64 elba = slba + nlb;
>>       u64 i;
>>
>> @@ -1010,8 +1009,15 @@ static int rrpc_l2p_update(u64 slba, u32 nlb, __le64 *entries, void *private)
>>               return -EINVAL;
>>       }
>>
>> +     slba -= rrpc->soffset >> (ilog2(dev->sec_size) - 9);
>> +     addr = rrpc->trans_map + slba;
>>       for (i = 0; i < nlb; i++) {
>> +             struct rrpc_lun *rlun;
>> +             struct nvm_lun *lun;
>>               u64 pba = le64_to_cpu(entries[i]);
>> +             u64 poffset;
>> +             int lunid;
>> +
>>               /* LNVM treats address-spaces as silos, LBA and PBA are
>>                * equally large and zero-indexed.
>>                */
>> @@ -1026,9 +1032,16 @@ static int rrpc_l2p_update(u64 slba, u32 nlb, __le64 *entries, void *private)
>>                */
>>               if (!pba)
>>                       continue;
>
> A new line will increase readability-
>
>> +             lunid = div_u64(pba, dev->sec_per_lun);
>> +             lun = dev->mt->get_lun(dev, lunid);
>> +             if (unlikely(!lun))
>> +                     return -EINVAL;
>
> Same
>
>> +             rlun = lun->private;
>> +             raddr = rlun->rev_trans_map;
>> +             poffset = lun_poffset(lun, dev);
>>
>>               addr[i].addr = pba;
>> -             raddr[pba].addr = slba + i;
>> +             raddr[pba - poffset].addr = slba + i;
>>       }
>>
>>       return 0;
>> @@ -1047,17 +1060,10 @@ static int rrpc_map_init(struct rrpc *rrpc)
>>       if (!rrpc->trans_map)
>>               return -ENOMEM;
>>
>> -     rrpc->rev_trans_map = vmalloc(sizeof(struct rrpc_rev_addr)
>> -                                                     * rrpc->nr_sects);
>> -     if (!rrpc->rev_trans_map)
>> -             return -ENOMEM;
>> -
>>       for (i = 0; i < rrpc->nr_sects; i++) {
>>               struct rrpc_addr *p = &rrpc->trans_map[i];
>> -             struct rrpc_rev_addr *r = &rrpc->rev_trans_map[i];
>>
>>               p->addr = ADDR_EMPTY;
>> -             r->addr = ADDR_EMPTY;
>>       }
>>
>>       if (!dev->ops->get_l2p_tbl)
>> @@ -1129,8 +1135,8 @@ static void rrpc_core_free(struct rrpc *rrpc)
>>  static void rrpc_luns_free(struct rrpc *rrpc)
>>  {
>>       struct nvm_dev *dev = rrpc->dev;
>> -     struct nvm_lun *lun;
>>       struct rrpc_lun *rlun;
>> +     struct nvm_lun *lun;
>>       int i;
>>
>>       if (!rrpc->luns)
>> @@ -1142,24 +1148,68 @@ static void rrpc_luns_free(struct rrpc *rrpc)
>>               if (!lun)
>>                       break;
>>               dev->mt->release_lun(dev, lun->id);
>> +             vfree(rlun->rev_trans_map);
>>               vfree(rlun->blocks);
>>       }
>>       kfree(rrpc->luns);
>> +     rrpc->luns = NULL;
>> +
>
> No new line needed-
>
>> +}
>> +
>> +static int rrpc_lun_init(struct rrpc *rrpc, struct rrpc_lun *rlun,
>> +                     struct nvm_lun *lun)
>> +{
>> +     struct nvm_dev *dev = rrpc->dev;
>> +     int i;
>> +
>> +     rlun->rev_trans_map = vmalloc(sizeof(struct rrpc_rev_addr) *
>> +                                     dev->sec_per_lun);
>> +     if (!rlun->rev_trans_map)
>> +             return -ENOMEM;
>> +
>> +     for (i = 0; i < dev->sec_per_lun; i++) {
>> +             struct rrpc_rev_addr *r = &rlun->rev_trans_map[i];
>> +
>> +             r->addr = ADDR_EMPTY;
>> +     }
>> +     rlun->blocks = vzalloc(sizeof(struct rrpc_block) * dev->blks_per_lun);
>> +     if (!rlun->blocks) {
>> +             vfree(rlun->rev_trans_map);
>> +             return -ENOMEM;
>> +     }
>> +
>> +     for (i = 0; i < dev->blks_per_lun; i++) {
>> +             struct rrpc_block *rblk = &rlun->blocks[i];
>> +             struct nvm_block *blk = &lun->blocks[i];
>> +
>> +             rblk->parent = blk;
>> +             rblk->rlun = rlun;
>> +             INIT_LIST_HEAD(&rblk->prio);
>> +             spin_lock_init(&rblk->lock);
>> +     }
>> +
>> +     rlun->rrpc = rrpc;
>> +     lun->private = rlun;
>> +     INIT_LIST_HEAD(&rlun->prio_list);
>> +     INIT_LIST_HEAD(&rlun->open_list);
>> +     INIT_LIST_HEAD(&rlun->closed_list);
>> +     INIT_WORK(&rlun->ws_gc, rrpc_lun_gc);
>> +     spin_lock_init(&rlun->lock);
>> +     spin_lock_init(&rlun->rev_lock);
>
> A new line would be great here.
>
>> +     return 0;
>>  }
>>
>> -static int rrpc_luns_init(struct rrpc *rrpc, int lun_begin, int lun_end)
>> +static int rrpc_luns_init(struct rrpc *rrpc, struct nvm_ioctl_create_conf *conf)
>>  {
>>       struct nvm_dev *dev = rrpc->dev;
>>       struct rrpc_lun *rlun;
>> -     int i, j, ret = -EINVAL;
>> +     int i, ret = -EINVAL;
>>
>>       if (dev->pgs_per_blk > MAX_INVALID_PAGES_STORAGE * BITS_PER_LONG) {
>>               pr_err("rrpc: number of pages per block too high.");
>>               return -EINVAL;
>>       }
>>
>> -     spin_lock_init(&rrpc->rev_lock);
>> -
>>       rrpc->luns = kcalloc(rrpc->nr_luns, sizeof(struct rrpc_lun),
>>                                                               GFP_KERNEL);
>>       if (!rrpc->luns)
>> @@ -1167,9 +1217,20 @@ static int rrpc_luns_init(struct rrpc *rrpc, int lun_begin, int lun_end)
>>
>>       /* 1:1 mapping */
>>       for (i = 0; i < rrpc->nr_luns; i++) {
>> -             int lunid = lun_begin + i;
>>               struct nvm_lun *lun;
>> +             int lunid;
>>
>> +             if (conf->type == NVM_CONFIG_TYPE_SIMPLE)
>> +                     lunid = conf->s.lun_begin + i;
>> +             else if (conf->type == NVM_CONFIG_TYPE_LIST)
>> +                     lunid = conf->l.lunid[i];
>> +             else
>> +                     goto err;
>
> It makes it more readable to insert a blank line here.
>
>> +             if (lunid >= dev->nr_luns) {
>> +                     pr_err("rrpc: lun out of bound (%u >= %u)\n",
>> +                                             lunid, dev->nr_luns);
>> +                     goto err;
>> +             }
>
> Same
>
>>               if (dev->mt->reserve_lun(dev, lunid)) {
>>                       pr_err("rrpc: lun %u is already allocated\n", lunid);
>>                       goto err;
>> @@ -1181,31 +1242,9 @@ static int rrpc_luns_init(struct rrpc *rrpc, int lun_begin, int lun_end)
>>
>>               rlun = &rrpc->luns[i];
>>               rlun->parent = lun;
>> -             rlun->blocks = vzalloc(sizeof(struct rrpc_block) *
>> -                                             rrpc->dev->blks_per_lun);
>> -             if (!rlun->blocks) {
>> -                     ret = -ENOMEM;
>> +             ret = rrpc_lun_init(rrpc, rlun, lun);
>> +             if (!ret)
>>                       goto err;
>> -             }
>> -
>> -             for (j = 0; j < rrpc->dev->blks_per_lun; j++) {
>> -                     struct rrpc_block *rblk = &rlun->blocks[j];
>> -                     struct nvm_block *blk = &lun->blocks[j];
>> -
>> -                     rblk->parent = blk;
>> -                     rblk->rlun = rlun;
>> -                     INIT_LIST_HEAD(&rblk->prio);
>> -                     spin_lock_init(&rblk->lock);
>> -             }
>> -
>> -             rlun->rrpc = rrpc;
>> -             INIT_LIST_HEAD(&rlun->prio_list);
>> -             INIT_LIST_HEAD(&rlun->open_list);
>> -             INIT_LIST_HEAD(&rlun->closed_list);
>> -
>> -             INIT_WORK(&rlun->ws_gc, rrpc_lun_gc);
>> -             spin_lock_init(&rlun->lock);
>> -
>>               rrpc->total_blocks += dev->blks_per_lun;
>>               rrpc->nr_sects += dev->sec_per_lun;
>>
>> @@ -1213,6 +1252,7 @@ static int rrpc_luns_init(struct rrpc *rrpc, int lun_begin, int lun_end)
>>
>>       return 0;
>>  err:
>> +     rrpc_luns_free(rrpc);
>>       return ret;
>>  }
>>
>> @@ -1285,14 +1325,16 @@ static sector_t rrpc_capacity(void *private)
>>  static void rrpc_block_map_update(struct rrpc *rrpc, struct rrpc_block *rblk)
>>  {
>>       struct nvm_dev *dev = rrpc->dev;
>> +     struct rrpc_lun *rlun = rblk->rlun;
>>       int offset;
>>       struct rrpc_addr *laddr;
>> -     u64 paddr, pladdr;
>> +     u64 paddr, pladdr, poffset;
>>
>> +     poffset = lun_poffset(rlun->parent, dev);
>>       for (offset = 0; offset < dev->pgs_per_blk; offset++) {
>>               paddr = block_to_addr(rrpc, rblk) + offset;
>>
>> -             pladdr = rrpc->rev_trans_map[paddr].addr;
>> +             pladdr = rlun->rev_trans_map[paddr - poffset].addr;
>>               if (pladdr == ADDR_EMPTY)
>>                       continue;
>>
>> @@ -1357,7 +1399,7 @@ err:
>>  static struct nvm_tgt_type tt_rrpc;
>>
>>  static void *rrpc_init(struct nvm_dev *dev, struct gendisk *tdisk,
>> -                                             int lun_begin, int lun_end)
>> +             struct nvm_ioctl_create_conf *conf)
>>  {
>>       struct request_queue *bqueue = dev->q;
>>       struct request_queue *tqueue = tdisk->queue;
>> @@ -1383,7 +1425,16 @@ static void *rrpc_init(struct nvm_dev *dev, struct gendisk *tdisk,
>>       spin_lock_init(&rrpc->bio_lock);
>>       INIT_WORK(&rrpc->ws_requeue, rrpc_requeue);
>>
>> -     rrpc->nr_luns = lun_end - lun_begin + 1;
>> +     if (conf->type == NVM_CONFIG_TYPE_SIMPLE)
>> +             rrpc->nr_luns =
>> +             conf->s.lun_end  - conf->s.lun_begin + 1;
>> +
>> +     else if (conf->type == NVM_CONFIG_TYPE_LIST)
>> +             rrpc->nr_luns = conf->l.nr_luns;
>> +     else {
>> +             kfree(rrpc);
>> +             return ERR_PTR(-EINVAL);
>> +     }
>>
>>       /* simple round-robin strategy */
>>       atomic_set(&rrpc->next_lun, -1);
>> @@ -1395,15 +1446,12 @@ static void *rrpc_init(struct nvm_dev *dev, struct gendisk *tdisk,
>>       }
>>       rrpc->soffset = soffset;
>>
>> -     ret = rrpc_luns_init(rrpc, lun_begin, lun_end);
>> +     ret = rrpc_luns_init(rrpc, conf);
>>       if (ret) {
>>               pr_err("nvm: rrpc: could not initialize luns\n");
>>               goto err;
>>       }
>>
>> -     rrpc->poffset = dev->sec_per_lun * lun_begin;
>> -     rrpc->lun_offset = lun_begin;
>> -
>>       ret = rrpc_core_init(rrpc);
>>       if (ret) {
>>               pr_err("nvm: rrpc: could not initialize core\n");
>> diff --git a/drivers/lightnvm/rrpc.h b/drivers/lightnvm/rrpc.h
>> index 6148b14..abe9135 100644
>> --- a/drivers/lightnvm/rrpc.h
>> +++ b/drivers/lightnvm/rrpc.h
>> @@ -87,6 +87,10 @@ struct rrpc_lun {
>>
>>       struct work_struct ws_gc;
>>
>> +     /* store a reverse map for garbage collection */
>> +     struct rrpc_rev_addr *rev_trans_map;
>> +     spinlock_t rev_lock;
>> +
>>       spinlock_t lock;
>>  };
>>
>> @@ -124,9 +128,6 @@ struct rrpc {
>>        * addresses are used when writing to the disk block device.
>>        */
>>       struct rrpc_addr *trans_map;
>> -     /* also store a reverse map for garbage collection */
>> -     struct rrpc_rev_addr *rev_trans_map;
>> -     spinlock_t rev_lock;
>>
>>       struct rrpc_inflight inflights;
>>
>> diff --git a/include/linux/lightnvm.h b/include/linux/lightnvm.h
>> index 2a17dc1..d2f2632 100644
>> --- a/include/linux/lightnvm.h
>> +++ b/include/linux/lightnvm.h
>> @@ -271,6 +271,7 @@ struct nvm_lun {
>>       spinlock_t lock;
>>
>>       struct nvm_block *blocks;
>> +     void *private;
>>  };
>>
>>  enum {
>> @@ -425,7 +426,8 @@ static inline int ppa_to_slc(struct nvm_dev *dev, int slc_pg)
>>
>>  typedef blk_qc_t (nvm_tgt_make_rq_fn)(struct request_queue *, struct bio *);
>>  typedef sector_t (nvm_tgt_capacity_fn)(void *);
>> -typedef void *(nvm_tgt_init_fn)(struct nvm_dev *, struct gendisk *, int, int);
>> +typedef void *(nvm_tgt_init_fn)(struct nvm_dev *, struct gendisk *,
>> +                     struct nvm_ioctl_create_conf *);
>>  typedef void (nvm_tgt_exit_fn)(void *);
>>
>>  struct nvm_tgt_type {
>> diff --git a/include/uapi/linux/lightnvm.h b/include/uapi/linux/lightnvm.h
>> index 774a431..23ebc0c9 100644
>> --- a/include/uapi/linux/lightnvm.h
>> +++ b/include/uapi/linux/lightnvm.h
>> @@ -35,6 +35,8 @@
>>  #define NVM_TTYPE_MAX 63
>>  #define NVM_MMTYPE_LEN 8
>>
>> +#define NVM_LUNS_MAX 1024
>
> Let's limit it to 768. That way we don't go above a single memory page,
> and we have room for other variables when needed.
>
>> +
>>  #define NVM_CTRL_FILE "/dev/lightnvm/control"
>>
>>  struct nvm_ioctl_info_tgt {
>> @@ -74,14 +76,21 @@ struct nvm_ioctl_create_simple {
>>       __u32 lun_end;
>>  };
>>
>> +struct nvm_ioctl_create_list {
>> +     __u32 nr_luns;
>> +     __u32 lunid[NVM_LUNS_MAX];
>> +};
>> +
>>  enum {
>>       NVM_CONFIG_TYPE_SIMPLE = 0,
>> +     NVM_CONFIG_TYPE_LIST,
>>  };
>>
>>  struct nvm_ioctl_create_conf {
>>       __u32 type;
>>       union {
>>               struct nvm_ioctl_create_simple s;
>> +             struct nvm_ioctl_create_list l;
>>       };
>>  };
>>
>> @@ -101,6 +110,12 @@ struct nvm_ioctl_remove {
>>       __u32 flags;
>>  };
>>
>> +struct nvm_ioctl_free_luns {
>> +     char dev[DISK_NAME_LEN];
>> +     __u32 nr_free_luns;
>> +     __u32 free_lunid[NVM_LUNS_MAX];
>> +};
>> +
>>  struct nvm_ioctl_dev_init {
>>       char dev[DISK_NAME_LEN];                /* open-channel SSD device */
>>       char mmtype[NVM_MMTYPE_LEN];            /* register to media manager */
>> @@ -131,6 +146,7 @@ enum {
>>       /* device level cmds */
>>       NVM_DEV_CREATE_CMD,
>>       NVM_DEV_REMOVE_CMD,
>> +     NVM_DEV_FREE_LUNS_CMD,
>>
>>       /* Init a device to support LightNVM media managers */
>>       NVM_DEV_INIT_CMD,
>> @@ -149,6 +165,8 @@ enum {
>>                                               struct nvm_ioctl_create)
>>  #define NVM_DEV_REMOVE               _IOW(NVM_IOCTL, NVM_DEV_REMOVE_CMD, \
>>                                               struct nvm_ioctl_remove)
>> +#define NVM_DEV_FREE_LUNS    _IOW(NVM_IOCTL, NVM_DEV_FREE_LUNS_CMD, \
>> +                                             struct nvm_ioctl_free_luns)
>>  #define NVM_DEV_INIT         _IOW(NVM_IOCTL, NVM_DEV_INIT_CMD, \
>>                                               struct nvm_ioctl_dev_init)
>>  #define NVM_DEV_FACTORY              _IOW(NVM_IOCTL, NVM_DEV_FACTORY_CMD, \
>>
>
> The patch is starting to look good.
>
> I think it would be great to move NVM_DEV_FREE_LUNS into its own patch
> and rename it to:
>
> NVM_DEV_LUNS_STATUS, and then return per lun information such as
>
>  - Id
>  - Lun is reserved/available
>  - Local lunid on channel
>  - Channel id
>  - Nr_open_blocks, nr_closed_blocks, nr_free_blocks, nr_bad_blocks
>
> or something similar?
>
> Thanks

^ permalink raw reply	[flat|nested] 9+ messages in thread

end of thread, other threads:[~2016-02-05 14:19 UTC | newest]

Thread overview: 9+ messages (download: mbox.gz / follow: Atom feed)
-- links below jump to the message on this page --
2016-02-04 11:34 [PATCH v3 1/3] lightnvm: specify target's logical address area Wenwei Tao
2016-02-04 11:34 ` [PATCH 2/3] lightnvm: add a bitmap of luns Wenwei Tao
2016-02-05 11:59   ` Matias Bjørling
2016-02-05 12:23     ` Wenwei Tao
2016-02-05 12:24       ` Matias Bjørling
2016-02-05  2:42 ` [PATCH v3 3/3] lightnvm: add non-continuous lun target creation support Wenwei Tao
2016-02-05 12:55   ` Matias Bjørling
2016-02-05 14:19     ` Wenwei Tao
2016-02-05 11:58 ` [PATCH v3 1/3] lightnvm: specify target's logical address area Matias Bjørling

This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for read-only IMAP folder(s) and NNTP newsgroup(s).