@@ -197,10 +197,17 @@ static struct mmc_queue_req *mmc_queue_alloc_mqrqs(struct mmc_queue *mq,
struct mmc_queue_req *mqrq;
int i;
+ if (mq->card->mqrq) {
+ mq->card->mqrq_ref_cnt += 1;
+ return mq->card->mqrq;
+ }
+
mqrq = kcalloc(qdepth, sizeof(*mqrq), GFP_KERNEL);
if (mqrq) {
for (i = 0; i < mq->qdepth; i++)
mqrq[i].task_id = i;
+ mq->card->mqrq = mqrq;
+ mq->card->mqrq_ref_cnt = 1;
}
return mqrq;
@@ -211,6 +218,9 @@ static bool mmc_queue_alloc_bounce_bufs(struct mmc_queue *mq,
{
int i;
+ if (mq->card->mqrq_ref_cnt > 1)
+ return !!mq->mqrq[0].bounce_buf;
+
for (i = 0; i < mq->qdepth; i++) {
mq->mqrq[i].bounce_buf = kmalloc(bouncesz, GFP_KERNEL);
if (!mq->mqrq[i].bounce_buf)
@@ -234,6 +244,9 @@ static int mmc_queue_alloc_bounce_sgs(struct mmc_queue *mq,
{
int i, ret;
+ if (mq->card->mqrq_ref_cnt > 1)
+ return 0;
+
for (i = 0; i < mq->qdepth; i++) {
mq->mqrq[i].sg = mmc_alloc_sg(1, &ret);
if (ret)
@@ -251,6 +264,9 @@ static int mmc_queue_alloc_sgs(struct mmc_queue *mq, int max_segs)
{
int i, ret;
+ if (mq->card->mqrq_ref_cnt > 1)
+ return 0;
+
for (i = 0; i < mq->qdepth; i++) {
mq->mqrq[i].sg = mmc_alloc_sg(max_segs, &ret);
if (ret)
@@ -280,6 +296,19 @@ static void mmc_queue_reqs_free_bufs(struct mmc_queue *mq)
mmc_queue_req_free_bufs(&mq->mqrq[i]);
}
+static void mmc_queue_free_mqrqs(struct mmc_queue *mq)
+{
+ if (!mq->mqrq)
+ return;
+
+ if (!--mq->card->mqrq_ref_cnt) {
+ mmc_queue_reqs_free_bufs(mq);
+ kfree(mq->card->mqrq);
+ mq->card->mqrq = NULL;
+ }
+ mq->mqrq = NULL;
+}
+
/**
* mmc_init_queue - initialise a queue structure.
* @mq: mmc queue
@@ -370,9 +399,7 @@ int mmc_init_queue(struct mmc_queue *mq, struct mmc_card *card,
return 0;
cleanup_queue:
- mmc_queue_reqs_free_bufs(mq);
- kfree(mq->mqrq);
- mq->mqrq = NULL;
+ mmc_queue_free_mqrqs(mq);
blk_cleanup:
blk_cleanup_queue(mq->queue);
return ret;
@@ -395,9 +422,7 @@ void mmc_cleanup_queue(struct mmc_queue *mq)
blk_start_queue(q);
spin_unlock_irqrestore(q->queue_lock, flags);
- mmc_queue_reqs_free_bufs(mq);
- kfree(mq->mqrq);
- mq->mqrq = NULL;
+ mmc_queue_free_mqrqs(mq);
mq->card = NULL;
}
@@ -413,6 +438,9 @@ int mmc_packed_init(struct mmc_queue *mq, struct mmc_card *card)
if (mq->qdepth != 2)
return -EINVAL;
+ if (mqrq_cur->packed)
+ goto out;
+
mqrq_cur->packed = kzalloc(sizeof(struct mmc_packed), GFP_KERNEL);
if (!mqrq_cur->packed) {
pr_warn("%s: unable to allocate packed cmd for mqrq_cur\n",
@@ -443,6 +471,9 @@ void mmc_packed_clean(struct mmc_queue *mq)
struct mmc_queue_req *mqrq_cur = &mq->mqrq[0];
struct mmc_queue_req *mqrq_prev = &mq->mqrq[1];
+ if (mq->card->mqrq_ref_cnt > 1)
+ return;
+
kfree(mqrq_cur->packed);
mqrq_cur->packed = NULL;
kfree(mqrq_prev->packed);
@@ -207,6 +207,7 @@ struct mmc_host;
struct mmc_ios;
struct sdio_func;
struct sdio_func_tuple;
+struct mmc_queue_req;
#define SDIO_MAX_FUNCS 7
@@ -319,6 +320,9 @@ struct mmc_card {
struct dentry *debugfs_root;
struct mmc_part part[MMC_NUM_PHY_PARTITION]; /* physical partitions */
unsigned int nr_parts;
+
+ struct mmc_queue_req *mqrq; /* Shared queue structure */
+ int mqrq_ref_cnt; /* Shared queue ref. count */
};
/*
eMMC can have multiple internal partitions that are represented as separate disks / queues. However the card has only 1 command queue which must be empty when switching partitions. Consequently the array of mmc requests that are queued can be shared between partitions saving memory. Keep a pointer to the mmc request queue on the card, and use that instead of allocating a new one for each partition. Use a reference count to keep track of when to free it. Signed-off-by: Adrian Hunter <adrian.hunter@intel.com> --- drivers/mmc/card/queue.c | 43 +++++++++++++++++++++++++++++++++++++------ include/linux/mmc/card.h | 4 ++++ 2 files changed, 41 insertions(+), 6 deletions(-)