diff mbox

[v3,9/9] mmc: core: Improve system responsiveness when polling for busy

Message ID 1527646378-134633-1-git-send-email-shawn.lin@rock-chips.com (mailing list archive)
State New, archived
Headers show

Commit Message

Shawn Lin May 30, 2018, 2:12 a.m. UTC
Previously, we only throttle sending status when doing earse or
accessing RPMB. So this patch firstly add a new flag to teach
mmc_poll_for_busy() to relinquish CPU for the cases aforementioned,
as mmc_poll_for_busy() is also used in performance critical path
so that we don't want to hurt performance.

However, based on the fact that the system will serve busy polling
which could drastically hurt responsiveness if only one online CPU
is serving the system. Then there is also an old saying that "Grasp
all, lose all", which teach us to sacrifice a little peformance to
gain responsiveness in this case.

Summary: relinquish CPU in MP system for doing erase and accessing RPMB,
or do it for all operations waiting for polling busy when only one CPU
is serving for schedule.

Signed-off-by: Shawn Lin <shawn.lin@rock-chips.com>

---

Changes in v3: None
Changes in v2: None

 drivers/mmc/core/block.c   |  7 ++++---
 drivers/mmc/core/core.c    |  2 +-
 drivers/mmc/core/mmc_ops.c | 25 +++++++++++++++++++++----
 drivers/mmc/core/mmc_ops.h |  2 +-
 4 files changed, 27 insertions(+), 9 deletions(-)

Comments

Ulf Hansson July 2, 2018, 1:17 p.m. UTC | #1
On 30 May 2018 at 04:12, Shawn Lin <shawn.lin@rock-chips.com> wrote:
> Previously, we only throttle sending status when doing earse or
> accessing RPMB. So this patch firstly add a new flag to teach
> mmc_poll_for_busy() to relinquish CPU for the cases aforementioned,
> as mmc_poll_for_busy() is also used in performance critical path
> so that we don't want to hurt performance.
>
> However, based on the fact that the system will serve busy polling
> which could drastically hurt responsiveness if only one online CPU
> is serving the system. Then there is also an old saying that "Grasp
> all, lose all", which teach us to sacrifice a little peformance to
> gain responsiveness in this case.
>
> Summary: relinquish CPU in MP system for doing erase and accessing RPMB,
> or do it for all operations waiting for polling busy when only one CPU
> is serving for schedule.

This makes sense, however please move this patch prior patch6 (changes
for erase), as else it means that you will introduce a regression in
behavior between patch 6 and $subject patch.

Perhaps the changelog should even mentiond that the adaptive polling
behavior is inherited from the similar approach already taken by the
erase polling scheme.

>
> Signed-off-by: Shawn Lin <shawn.lin@rock-chips.com>
>

[...]

>  int mmc_poll_for_busy(struct mmc_card *card, unsigned int timeout_ms,
>                       bool send_status, bool retry_crc_err, bool use_r1b_resp,
>                       u32 *resp_status, bool check_busy(u32 device_status),
> -                     bool poll_for_switch)
> +                     bool poll_for_switch, bool relinquish_cpu)

Maybe "adaptive_poll", give a better description of the use of the parameter!?

>  {
>         struct mmc_host *host = card->host;
> -       int err;
> +       int err, loop_udelay=64, udelay_max=32768;
>         unsigned long timeout;
>         u32 status = 0;
>         bool expired = false;
> @@ -519,7 +519,24 @@ int mmc_poll_for_busy(struct mmc_card *card, unsigned int timeout_ms,
>                                 mmc_hostname(host), __func__);
>                         return -ETIMEDOUT;
>                 }
> -       } while (busy);
> +
> +               if (!busy)
> +                       break;
> +
> +               /*
> +                * Relinquish CPU unconditionally if the caller wants.
> +                * Or try to do that if only one CPU is online for sched
> +                * which is based on the strategy to balance response time
> +                * with turnaround time.
> +                */
> +               if (unlikely(relinquish_cpu || num_online_cpus() == 1)) {
> +                       usleep_range(loop_udelay, loop_udelay << 1);
> +                       if (loop_udelay < udelay_max)
> +                               loop_udelay <<= 1;
> +               }
> +
> +
> +       } while (1);
>
>         return 0;
>  }
> @@ -598,7 +615,7 @@ int __mmc_switch(struct mmc_card *card, u8 set, u8 index, u8 value,
>         /* Let's try to poll to find out when the command is completed. */
>         err = mmc_poll_for_busy(card, timeout_ms, send_status, retry_crc_err,
>                                 use_r1b_resp, NULL, &mmc_switch_in_prg_state,
> -                               true);
> +                               true, false);
>         if (err)
>                 goto out;
>
> diff --git a/drivers/mmc/core/mmc_ops.h b/drivers/mmc/core/mmc_ops.h
> index 8830c1d..4a30a81 100644
> --- a/drivers/mmc/core/mmc_ops.h
> +++ b/drivers/mmc/core/mmc_ops.h
> @@ -36,7 +36,7 @@
>  int mmc_poll_for_busy(struct mmc_card *card, unsigned int timeout_ms,
>                       bool send_status, bool retry_crc_err, bool use_r1b_resp,
>                       u32 *resp_status, bool check_busy(u32 device_status),
> -                     bool poll_for_switch);
> +                     bool poll_for_switch, bool relinquish_cpu);
>  int mmc_switch_status(struct mmc_card *card);
>  int __mmc_switch_status(struct mmc_card *card, bool crc_err_fatal);
>  int __mmc_switch(struct mmc_card *card, u8 set, u8 index, u8 value,
> --
> 1.9.1
>
>

Kind regards
Uffe
--
To unsubscribe from this list: send the line "unsubscribe linux-mmc" in
the body of a message to majordomo@vger.kernel.org
More majordomo info at  http://vger.kernel.org/majordomo-info.html
diff mbox

Patch

diff --git a/drivers/mmc/core/block.c b/drivers/mmc/core/block.c
index 4186bc2..da19e0d 100644
--- a/drivers/mmc/core/block.c
+++ b/drivers/mmc/core/block.c
@@ -586,7 +586,8 @@  static int __mmc_blk_ioctl_cmd(struct mmc_card *card, struct mmc_blk_data *md,
 		 * "Send Status".
 		 */
 		err = mmc_poll_for_busy(card, 500, true, false, false, NULL,
-					&mmc_blk_rpmb_in_busy_state, false);
+					&mmc_blk_rpmb_in_busy_state, false,
+					true);
 		if (err)
 			/* Convert to -EPERM to avoid possible ABI regression */
 			err = -EPERM;
@@ -1600,7 +1601,7 @@  static int mmc_blk_fix_state(struct mmc_card *card, struct request *req)
 	mmc_blk_send_stop(card, timeout);
 
 	err = mmc_poll_for_busy(card, timeout, true, false, false, NULL,
-				&mmc_blk_in_busy_state, false);
+				&mmc_blk_in_busy_state, false, false);
 
 
 	mmc_retune_release(card->host);
@@ -1826,7 +1827,7 @@  static int mmc_blk_card_busy(struct mmc_card *card, struct request *req)
 		return 0;
 
 	err = mmc_poll_for_busy(card, MMC_BLK_TIMEOUT_MS, true, false, false,
-				&status, &mmc_blk_in_busy_state, false);
+				&status, &mmc_blk_in_busy_state, false, false);
 
 	/*
 	 * Do not assume data transferred correctly if there are any error bits
diff --git a/drivers/mmc/core/core.c b/drivers/mmc/core/core.c
index d10df43..722150d 100644
--- a/drivers/mmc/core/core.c
+++ b/drivers/mmc/core/core.c
@@ -2065,7 +2065,7 @@  static int mmc_do_erase(struct mmc_card *card, unsigned int from,
 	}
 
 	err = mmc_poll_for_busy(card, busy_timeout, true, false, use_r1b_resp,
-				NULL, &mmc_erase_in_busy_state, false);
+				NULL, &mmc_erase_in_busy_state, false, true);
 out:
 	mmc_retune_release(card->host);
 	return err;
diff --git a/drivers/mmc/core/mmc_ops.c b/drivers/mmc/core/mmc_ops.c
index 9ff25c6..81e3800 100644
--- a/drivers/mmc/core/mmc_ops.c
+++ b/drivers/mmc/core/mmc_ops.c
@@ -454,10 +454,10 @@  int mmc_switch_status(struct mmc_card *card)
 int mmc_poll_for_busy(struct mmc_card *card, unsigned int timeout_ms,
 		      bool send_status, bool retry_crc_err, bool use_r1b_resp,
 		      u32 *resp_status, bool check_busy(u32 device_status),
-		      bool poll_for_switch)
+		      bool poll_for_switch, bool relinquish_cpu)
 {
 	struct mmc_host *host = card->host;
-	int err;
+	int err, loop_udelay=64, udelay_max=32768;
 	unsigned long timeout;
 	u32 status = 0;
 	bool expired = false;
@@ -519,7 +519,24 @@  int mmc_poll_for_busy(struct mmc_card *card, unsigned int timeout_ms,
 				mmc_hostname(host), __func__);
 			return -ETIMEDOUT;
 		}
-	} while (busy);
+
+		if (!busy)
+			break;
+
+		/*
+		 * Relinquish CPU unconditionally if the caller wants.
+		 * Or try to do that if only one CPU is online for sched
+		 * which is based on the strategy to balance response time
+		 * with turnaround time.
+		 */
+		if (unlikely(relinquish_cpu || num_online_cpus() == 1)) {
+			usleep_range(loop_udelay, loop_udelay << 1);
+			if (loop_udelay < udelay_max)
+				loop_udelay <<= 1;
+		}
+
+
+	} while (1);
 
 	return 0;
 }
@@ -598,7 +615,7 @@  int __mmc_switch(struct mmc_card *card, u8 set, u8 index, u8 value,
 	/* Let's try to poll to find out when the command is completed. */
 	err = mmc_poll_for_busy(card, timeout_ms, send_status, retry_crc_err,
 				use_r1b_resp, NULL, &mmc_switch_in_prg_state,
-				true);
+				true, false);
 	if (err)
 		goto out;
 
diff --git a/drivers/mmc/core/mmc_ops.h b/drivers/mmc/core/mmc_ops.h
index 8830c1d..4a30a81 100644
--- a/drivers/mmc/core/mmc_ops.h
+++ b/drivers/mmc/core/mmc_ops.h
@@ -36,7 +36,7 @@ 
 int mmc_poll_for_busy(struct mmc_card *card, unsigned int timeout_ms,
 		      bool send_status, bool retry_crc_err, bool use_r1b_resp,
 		      u32 *resp_status, bool check_busy(u32 device_status),
-		      bool poll_for_switch);
+		      bool poll_for_switch, bool relinquish_cpu);
 int mmc_switch_status(struct mmc_card *card);
 int __mmc_switch_status(struct mmc_card *card, bool crc_err_fatal);
 int __mmc_switch(struct mmc_card *card, u8 set, u8 index, u8 value,