Message ID | 20210420055306.4858-2-avri.altman@wdc.com (mailing list archive) |
---|---|
State | New, archived |
Headers | show |
Series | Do not flush cache when it is disabled | expand |
On 20/04/21 8:53 am, Avri Altman wrote: > The cache may be flushed to the nonvolatile storage by writing to > FLUSH_CACHE byte (EXT_CSD byte [32]). When in command queueing mode, the > cache may be flushed by issuing a CMDQ_TASK_ DEV_MGMT (CMD48) with a > FLUSH_CACHE op-code. Either way, verify that The cache function is > turned ON before doing so. > > fixes: 1e8e55b67030 (mmc: block: Add CQE support) > > Reported-by: Brendan Peter <bpeter@lytx.com> > Tested-by: Brendan Peter <bpeter@lytx.com> > Signed-off-by: Avri Altman <avri.altman@wdc.com> > --- > drivers/mmc/core/block.c | 7 +++++++ > drivers/mmc/core/mmc.c | 2 +- > drivers/mmc/core/mmc_ops.h | 5 +++++ > 3 files changed, 13 insertions(+), 1 deletion(-) > > diff --git a/drivers/mmc/core/block.c b/drivers/mmc/core/block.c > index 8bfd4d95b386..5b6501fc9fb7 100644 > --- a/drivers/mmc/core/block.c > +++ b/drivers/mmc/core/block.c > @@ -1476,6 +1476,11 @@ static int mmc_blk_cqe_issue_flush(struct mmc_queue *mq, struct request *req) > struct mmc_queue_req *mqrq = req_to_mmc_queue_req(req); > struct mmc_request *mrq = mmc_blk_cqe_prep_dcmd(mqrq, req); > > + if (mmc_card_mmc(mq->card) && !mmc_flush_allowed(mq->card)) { > + blk_mq_end_request(req, BLK_STS_OK); > + return -EPERM; > + } > + > mrq->cmd->opcode = MMC_SWITCH; > mrq->cmd->arg = (MMC_SWITCH_MODE_WRITE_BYTE << 24) | > (EXT_CSD_FLUSH_CACHE << 16) | > @@ -2226,6 +2231,8 @@ enum mmc_issued mmc_blk_mq_issue_rq(struct mmc_queue *mq, struct request *req) > switch (req_op(req)) { > case REQ_OP_FLUSH: > ret = mmc_blk_cqe_issue_flush(mq, req); > + if (ret == -EPERM) > + return MMC_REQ_FINISHED; Using an error code for this case seems a little fragile. How about something like: case REQ_OP_FLUSH: if (mmc_blk_cache_disabled(mq->card)) { blk_mq_end_request(req, BLK_STS_OK); return MMC_REQ_FINISHED; } ret = mmc_blk_cqe_issue_flush(mq, req); static bool mmc_blk_cache_disabled(struct mmc_card *card) { return mmc_card_mmc(mq->card) && !mmc_flush_allowed(card); } > break; > case REQ_OP_READ: > case REQ_OP_WRITE: > diff --git a/drivers/mmc/core/mmc.c b/drivers/mmc/core/mmc.c > index 9ad4aa537867..e3da62ffcb5e 100644 > --- a/drivers/mmc/core/mmc.c > +++ b/drivers/mmc/core/mmc.c > @@ -2037,7 +2037,7 @@ static int _mmc_flush_cache(struct mmc_card *card) > { > int err = 0; > > - if (card->ext_csd.cache_size > 0 && card->ext_csd.cache_ctrl & 1) { > + if (mmc_flush_allowed(card)) { > err = mmc_switch(card, EXT_CSD_CMD_SET_NORMAL, > EXT_CSD_FLUSH_CACHE, 1, > CACHE_FLUSH_TIMEOUT_MS); > diff --git a/drivers/mmc/core/mmc_ops.h b/drivers/mmc/core/mmc_ops.h > index 5782fdf4e8e9..2682bf66708a 100644 > --- a/drivers/mmc/core/mmc_ops.h > +++ b/drivers/mmc/core/mmc_ops.h > @@ -19,6 +19,11 @@ enum mmc_busy_cmd { > struct mmc_host; > struct mmc_card; > > +static inline bool mmc_flush_allowed(struct mmc_card *card) > +{ > + return card->ext_csd.cache_size > 0 && card->ext_csd.cache_ctrl & 1; > +} > + > int mmc_select_card(struct mmc_card *card); > int mmc_deselect_cards(struct mmc_host *host); > int mmc_set_dsr(struct mmc_host *host); >
> On 20/04/21 8:53 am, Avri Altman wrote: > > The cache may be flushed to the nonvolatile storage by writing to > > FLUSH_CACHE byte (EXT_CSD byte [32]). When in command queueing mode, > the > > cache may be flushed by issuing a CMDQ_TASK_ DEV_MGMT (CMD48) with a > > FLUSH_CACHE op-code. Either way, verify that The cache function is > > turned ON before doing so. > > > > fixes: 1e8e55b67030 (mmc: block: Add CQE support) > > > > Reported-by: Brendan Peter <bpeter@lytx.com> > > Tested-by: Brendan Peter <bpeter@lytx.com> > > Signed-off-by: Avri Altman <avri.altman@wdc.com> > > --- > > drivers/mmc/core/block.c | 7 +++++++ > > drivers/mmc/core/mmc.c | 2 +- > > drivers/mmc/core/mmc_ops.h | 5 +++++ > > 3 files changed, 13 insertions(+), 1 deletion(-) > > > > diff --git a/drivers/mmc/core/block.c b/drivers/mmc/core/block.c > > index 8bfd4d95b386..5b6501fc9fb7 100644 > > --- a/drivers/mmc/core/block.c > > +++ b/drivers/mmc/core/block.c > > @@ -1476,6 +1476,11 @@ static int mmc_blk_cqe_issue_flush(struct > mmc_queue *mq, struct request *req) > > struct mmc_queue_req *mqrq = req_to_mmc_queue_req(req); > > struct mmc_request *mrq = mmc_blk_cqe_prep_dcmd(mqrq, req); > > > > + if (mmc_card_mmc(mq->card) && !mmc_flush_allowed(mq->card)) { > > + blk_mq_end_request(req, BLK_STS_OK); > > + return -EPERM; > > + } > > + > > mrq->cmd->opcode = MMC_SWITCH; > > mrq->cmd->arg = (MMC_SWITCH_MODE_WRITE_BYTE << 24) | > > (EXT_CSD_FLUSH_CACHE << 16) | > > @@ -2226,6 +2231,8 @@ enum mmc_issued mmc_blk_mq_issue_rq(struct > mmc_queue *mq, struct request *req) > > switch (req_op(req)) { > > case REQ_OP_FLUSH: > > ret = mmc_blk_cqe_issue_flush(mq, req); > > + if (ret == -EPERM) > > + return MMC_REQ_FINISHED; > > Using an error code for this case seems a little fragile. > > How about something like: > > case REQ_OP_FLUSH: > if (mmc_blk_cache_disabled(mq->card)) { > blk_mq_end_request(req, BLK_STS_OK); > return MMC_REQ_FINISHED; > } > ret = mmc_blk_cqe_issue_flush(mq, req); > > > static bool mmc_blk_cache_disabled(struct mmc_card *card) > { > return mmc_card_mmc(mq->card) && !mmc_flush_allowed(card); > } Done. Thanks, Avri
diff --git a/drivers/mmc/core/block.c b/drivers/mmc/core/block.c index 8bfd4d95b386..5b6501fc9fb7 100644 --- a/drivers/mmc/core/block.c +++ b/drivers/mmc/core/block.c @@ -1476,6 +1476,11 @@ static int mmc_blk_cqe_issue_flush(struct mmc_queue *mq, struct request *req) struct mmc_queue_req *mqrq = req_to_mmc_queue_req(req); struct mmc_request *mrq = mmc_blk_cqe_prep_dcmd(mqrq, req); + if (mmc_card_mmc(mq->card) && !mmc_flush_allowed(mq->card)) { + blk_mq_end_request(req, BLK_STS_OK); + return -EPERM; + } + mrq->cmd->opcode = MMC_SWITCH; mrq->cmd->arg = (MMC_SWITCH_MODE_WRITE_BYTE << 24) | (EXT_CSD_FLUSH_CACHE << 16) | @@ -2226,6 +2231,8 @@ enum mmc_issued mmc_blk_mq_issue_rq(struct mmc_queue *mq, struct request *req) switch (req_op(req)) { case REQ_OP_FLUSH: ret = mmc_blk_cqe_issue_flush(mq, req); + if (ret == -EPERM) + return MMC_REQ_FINISHED; break; case REQ_OP_READ: case REQ_OP_WRITE: diff --git a/drivers/mmc/core/mmc.c b/drivers/mmc/core/mmc.c index 9ad4aa537867..e3da62ffcb5e 100644 --- a/drivers/mmc/core/mmc.c +++ b/drivers/mmc/core/mmc.c @@ -2037,7 +2037,7 @@ static int _mmc_flush_cache(struct mmc_card *card) { int err = 0; - if (card->ext_csd.cache_size > 0 && card->ext_csd.cache_ctrl & 1) { + if (mmc_flush_allowed(card)) { err = mmc_switch(card, EXT_CSD_CMD_SET_NORMAL, EXT_CSD_FLUSH_CACHE, 1, CACHE_FLUSH_TIMEOUT_MS); diff --git a/drivers/mmc/core/mmc_ops.h b/drivers/mmc/core/mmc_ops.h index 5782fdf4e8e9..2682bf66708a 100644 --- a/drivers/mmc/core/mmc_ops.h +++ b/drivers/mmc/core/mmc_ops.h @@ -19,6 +19,11 @@ enum mmc_busy_cmd { struct mmc_host; struct mmc_card; +static inline bool mmc_flush_allowed(struct mmc_card *card) +{ + return card->ext_csd.cache_size > 0 && card->ext_csd.cache_ctrl & 1; +} + int mmc_select_card(struct mmc_card *card); int mmc_deselect_cards(struct mmc_host *host); int mmc_set_dsr(struct mmc_host *host);