diff mbox

MMC quirks relating to performance/lifetime.

Message ID AANLkTimWAnPg2sYiLbfZXAAUiu7P5T3LmNoVbPi9RTEG@mail.gmail.com (mailing list archive)
State New, archived
Headers show

Commit Message

Andrei Warkentin Feb. 18, 2011, 11:17 p.m. UTC
None
diff mbox

Patch

From b3e6a556a716e7cec86071342197e798b38c3cbf Mon Sep 17 00:00:00 2001
From: Andrei Warkentin <andreiw@motorola.com>
Date: Fri, 18 Feb 2011 17:46:00 -0600
Subject: [PATCH] MMC: Split non-page-size aligned accesses.

If the card page size is known, splits the access into an unaligned
and an aligned portion, which helps with the performance.

Change-Id: I4ad7588d613d775212fac87436e418577909a22b
Signed-off-by: Andrei Warkentin <andreiw@motorola.com>
---
 drivers/mmc/card/block.c |  111 ++++++++++++++++++++++++++++++++++++++++++++++
 include/linux/mmc/card.h |    1 +
 2 files changed, 112 insertions(+), 0 deletions(-)

diff --git a/drivers/mmc/card/block.c b/drivers/mmc/card/block.c
index 7054fd5..be7d739 100644
--- a/drivers/mmc/card/block.c
+++ b/drivers/mmc/card/block.c
@@ -22,6 +22,7 @@ 
 #include <linux/init.h>
 
 #include <linux/kernel.h>
+#include <linux/ctype.h>
 #include <linux/fs.h>
 #include <linux/slab.h>
 #include <linux/errno.h>
@@ -67,6 +68,74 @@  struct mmc_blk_data {
 
 static DEFINE_MUTEX(open_lock);
 
+static ssize_t
+show_block_attr(struct device *dev, struct device_attribute *attr,
+		char *buf);
+
+static ssize_t
+set_block_attr(struct device *dev, struct device_attribute *attr,
+	       const char *buf, size_t count);
+
+static DEVICE_ATTR(page_size, S_IRUGO | S_IWUSR, show_block_attr, set_block_attr);
+
+static ssize_t
+show_block_attr(struct device *dev, struct device_attribute *attr,
+		char *buf)
+{
+	unsigned int val;
+	ssize_t ret = 0;
+	struct mmc_card *card = container_of(dev, struct mmc_card, dev);
+	mmc_claim_host(card->host);
+	if (attr == &dev_attr_page_size)
+		val = card->page_size;
+	else
+		ret = -EINVAL;
+
+	mmc_release_host(card->host);
+	if (!ret)
+		ret = sprintf(buf, "%u\n", val);
+	return ret;
+}
+
+static ssize_t
+set_block_attr(struct device *dev, struct device_attribute *attr,
+	       const char *buf, size_t count)
+{
+	ssize_t ret;
+	char *after;
+	unsigned int val, *dest = NULL;
+	struct mmc_card *card = container_of(dev, struct mmc_card, dev);
+	val = simple_strtoul(buf, &after, 10);
+	ret = after - buf;
+
+	while (isspace(*after++))
+		ret++;
+
+	if (ret != count)
+		return -EINVAL;
+
+	if (attr == &dev_attr_page_size)
+		dest = &card->page_size;
+	else
+		return -EINVAL;
+
+	if (dest) {
+		mmc_claim_host(card->host);
+		*dest = val;
+		mmc_release_host(card->host);
+	}
+	return ret;
+}
+
+static struct attribute *capability_attrs[] = {
+	&dev_attr_page_size.attr,
+	NULL,
+};
+
+static struct attribute_group attr_group = {
+        .attrs = capability_attrs,
+};
+
 static struct mmc_blk_data *mmc_blk_get(struct gendisk *disk)
 {
 	struct mmc_blk_data *md;
@@ -312,6 +381,38 @@  out:
 	return err ? 0 : 1;
 }
 
+
+/*
+ * If the request is not aligned, split it into an unaligned
+ * and an aligned portion. Here we can adjust
+ * the size of the MMC request and let the block layer request handle
+ * deal with generating another MMC request.
+ */
+static bool mmc_adjust_write(struct mmc_card *card,
+			     struct mmc_request *mrq)
+{
+	unsigned int left_in_page;
+	unsigned int page_size_blocks;
+
+	if (!card->page_size)
+		return false;
+
+	page_size_blocks = card->page_size / mrq->data->blksz;
+	left_in_page = page_size_blocks -
+		(mrq->cmd->arg % page_size_blocks);
+
+	/* Aligned access. */
+	if (left_in_page == page_size_blocks)
+		return false;
+
+	/* Not straddling page boundary. */
+	if (mrq->data->blocks <= left_in_page)
+		return false;
+
+	mrq->data->blocks = left_in_page;
+	return true;
+}
+
 static int mmc_blk_issue_rw_rq(struct mmc_queue *mq, struct request *req)
 {
 	struct mmc_blk_data *md = mq->data;
@@ -339,6 +440,10 @@  static int mmc_blk_issue_rw_rq(struct mmc_queue *mq, struct request *req)
 		brq.stop.flags = MMC_RSP_SPI_R1B | MMC_RSP_R1B | MMC_CMD_AC;
 		brq.data.blocks = blk_rq_sectors(req);
 
+		/* Check for unaligned accesses straddling pages. */
+		if (rq_data_dir(req) == WRITE)
+			mmc_adjust_write(card, &brq.mrq);
+
 		/*
 		 * The block layer doesn't support all sector count
 		 * restrictions, so we need to be prepared for too big
@@ -707,6 +812,10 @@  static int mmc_blk_probe(struct mmc_card *card)
 	if (err)
 		goto out;
 
+	err = sysfs_create_group(&card->dev.kobj, &attr_group);
+	if (err)
+		goto out;
+
 	string_get_size((u64)get_capacity(md->disk) << 9, STRING_UNITS_2,
 			cap_str, sizeof(cap_str));
 	printk(KERN_INFO "%s: %s %s %s %s\n",
@@ -735,6 +844,8 @@  static void mmc_blk_remove(struct mmc_card *card)
 		/* Stop new requests from getting into the queue */
 		del_gendisk(md->disk);
 
+		sysfs_remove_group(&card->dev.kobj, &attr_group);
+
 		/* Then flush out any already in there */
 		mmc_cleanup_queue(&md->queue);
 
diff --git a/include/linux/mmc/card.h b/include/linux/mmc/card.h
index 6b75250..d52768a 100644
--- a/include/linux/mmc/card.h
+++ b/include/linux/mmc/card.h
@@ -123,7 +123,7 @@  struct mmc_card {
 	unsigned int		erase_size;	/* erase size in sectors */
  	unsigned int		erase_shift;	/* if erase unit is power 2 */
  	unsigned int		pref_erase;	/* in sectors */
+ 	unsigned int		page_size;	/* page size in bytes */
  	u8			erased_byte;	/* value of erased bytes */
 
 	u32			raw_cid[4];	/* raw card CID */
-- 
1.7.0.4