Message ID | 1581324597-31031-9-git-send-email-haibo.chen@nxp.com (mailing list archive) |
---|---|
State | New, archived |
Headers | show |
Series | few fix for sdhci-esdhc-imx | expand |
On 10/02/20 10:49 am, haibo.chen@nxp.com wrote: > From: Haibo Chen <haibo.chen@nxp.com> > > To make dma_set_max_seg_size() work, need to create dev->dma_parms. > > Find this issue on i.MX8QM mek board, this platform config the > max_segment_size to 65535, but this dma_set_max_seg_size do not > actuall work, find sometimes the segment size is 65536, exceed > the hardware max segment limitation, trigger ADMA error. > > Signed-off-by: Haibo Chen <haibo.chen@nxp.com> > --- > drivers/mmc/core/queue.c | 9 +++++---- > 1 file changed, 5 insertions(+), 4 deletions(-) > > diff --git a/drivers/mmc/core/queue.c b/drivers/mmc/core/queue.c > index 9edc08685e86..f74c28c58482 100644 > --- a/drivers/mmc/core/queue.c > +++ b/drivers/mmc/core/queue.c > @@ -359,6 +359,7 @@ static const struct blk_mq_ops mmc_mq_ops = { > static void mmc_setup_queue(struct mmc_queue *mq, struct mmc_card *card) > { > struct mmc_host *host = card->host; > + struct device *dev = mmc_dev(host); > unsigned block_size = 512; > > blk_queue_flag_set(QUEUE_FLAG_NONROT, mq->queue); > @@ -366,13 +367,12 @@ static void mmc_setup_queue(struct mmc_queue *mq, struct mmc_card *card) > if (mmc_can_erase(card)) > mmc_queue_setup_discard(mq->queue, card); > > - if (!mmc_dev(host)->dma_mask || !*mmc_dev(host)->dma_mask) > + if (!dev->dma_mask || !*dev->dma_mask) > blk_queue_bounce_limit(mq->queue, BLK_BOUNCE_HIGH); > blk_queue_max_hw_sectors(mq->queue, > min(host->max_blk_count, host->max_req_size / 512)); > if (host->can_dma_map_merge) > - WARN(!blk_queue_can_use_dma_map_merging(mq->queue, > - mmc_dev(host)), > + WARN(!blk_queue_can_use_dma_map_merging(mq->queue, dev), > "merging was advertised but not possible"); > blk_queue_max_segments(mq->queue, mmc_get_max_segments(host)); > > @@ -389,7 +389,8 @@ static void mmc_setup_queue(struct mmc_queue *mq, struct mmc_card *card) > blk_queue_max_segment_size(mq->queue, > round_down(host->max_seg_size, block_size)); > > - dma_set_max_seg_size(mmc_dev(host), queue_max_segment_size(mq->queue)); > + dev->dma_parms = devm_kzalloc(dev, sizeof(*dev->dma_parms), GFP_KERNEL); Wouldn't it be more logical to keep existing dma_parms? i.e. if (!dev->dma_parms) dev->dma_parms = devm_kzalloc(dev, sizeof(*dev->dma_parms), GFP_KERNEL); > + dma_set_max_seg_size(dev, queue_max_segment_size(mq->queue)); > > INIT_WORK(&mq->recovery_work, mmc_mq_recovery_handler); > INIT_WORK(&mq->complete_work, mmc_blk_mq_complete_work); >
> -----Original Message----- > From: linux-mmc-owner@vger.kernel.org > <linux-mmc-owner@vger.kernel.org> On Behalf Of Adrian Hunter > Sent: 2020年2月18日 16:16 > To: BOUGH CHEN <haibo.chen@nxp.com>; ulf.hansson@linaro.org; > linux-mmc@vger.kernel.org > Cc: dl-linux-imx <linux-imx@nxp.com>; linus.walleij@linaro.org > Subject: Re: [PATCH v3 14/14] mmc: queue: create dev->dma_parms before > call dma_set_max_seg_size() > > On 10/02/20 10:49 am, haibo.chen@nxp.com wrote: > > From: Haibo Chen <haibo.chen@nxp.com> > > > > To make dma_set_max_seg_size() work, need to create dev->dma_parms. > > > > Find this issue on i.MX8QM mek board, this platform config the > > max_segment_size to 65535, but this dma_set_max_seg_size do not > > actuall work, find sometimes the segment size is 65536, exceed the > > hardware max segment limitation, trigger ADMA error. > > > > Signed-off-by: Haibo Chen <haibo.chen@nxp.com> > > --- > > drivers/mmc/core/queue.c | 9 +++++---- > > 1 file changed, 5 insertions(+), 4 deletions(-) > > > > diff --git a/drivers/mmc/core/queue.c b/drivers/mmc/core/queue.c index > > 9edc08685e86..f74c28c58482 100644 > > --- a/drivers/mmc/core/queue.c > > +++ b/drivers/mmc/core/queue.c > > @@ -359,6 +359,7 @@ static const struct blk_mq_ops mmc_mq_ops = { > > static void mmc_setup_queue(struct mmc_queue *mq, struct mmc_card > > *card) { > > struct mmc_host *host = card->host; > > + struct device *dev = mmc_dev(host); > > unsigned block_size = 512; > > > > blk_queue_flag_set(QUEUE_FLAG_NONROT, mq->queue); @@ -366,13 > +367,12 > > @@ static void mmc_setup_queue(struct mmc_queue *mq, struct > mmc_card *card) > > if (mmc_can_erase(card)) > > mmc_queue_setup_discard(mq->queue, card); > > > > - if (!mmc_dev(host)->dma_mask || !*mmc_dev(host)->dma_mask) > > + if (!dev->dma_mask || !*dev->dma_mask) > > blk_queue_bounce_limit(mq->queue, BLK_BOUNCE_HIGH); > > blk_queue_max_hw_sectors(mq->queue, > > min(host->max_blk_count, host->max_req_size / 512)); > > if (host->can_dma_map_merge) > > - WARN(!blk_queue_can_use_dma_map_merging(mq->queue, > > - mmc_dev(host)), > > + WARN(!blk_queue_can_use_dma_map_merging(mq->queue, dev), > > "merging was advertised but not possible"); > > blk_queue_max_segments(mq->queue, > mmc_get_max_segments(host)); > > > > @@ -389,7 +389,8 @@ static void mmc_setup_queue(struct mmc_queue > *mq, struct mmc_card *card) > > blk_queue_max_segment_size(mq->queue, > > round_down(host->max_seg_size, block_size)); > > > > - dma_set_max_seg_size(mmc_dev(host), > queue_max_segment_size(mq->queue)); > > + dev->dma_parms = devm_kzalloc(dev, sizeof(*dev->dma_parms), > > +GFP_KERNEL); > > Wouldn't it be more logical to keep existing dma_parms? i.e. > > if (!dev->dma_parms) > dev->dma_parms = devm_kzalloc(dev, sizeof(*dev->dma_parms), > GFP_KERNEL); Yes! I will do that. Best Regards Bough Chen > > > + dma_set_max_seg_size(dev, queue_max_segment_size(mq->queue)); > > > > INIT_WORK(&mq->recovery_work, mmc_mq_recovery_handler); > > INIT_WORK(&mq->complete_work, mmc_blk_mq_complete_work); > >
diff --git a/drivers/mmc/core/queue.c b/drivers/mmc/core/queue.c index 9edc08685e86..f74c28c58482 100644 --- a/drivers/mmc/core/queue.c +++ b/drivers/mmc/core/queue.c @@ -359,6 +359,7 @@ static const struct blk_mq_ops mmc_mq_ops = { static void mmc_setup_queue(struct mmc_queue *mq, struct mmc_card *card) { struct mmc_host *host = card->host; + struct device *dev = mmc_dev(host); unsigned block_size = 512; blk_queue_flag_set(QUEUE_FLAG_NONROT, mq->queue); @@ -366,13 +367,12 @@ static void mmc_setup_queue(struct mmc_queue *mq, struct mmc_card *card) if (mmc_can_erase(card)) mmc_queue_setup_discard(mq->queue, card); - if (!mmc_dev(host)->dma_mask || !*mmc_dev(host)->dma_mask) + if (!dev->dma_mask || !*dev->dma_mask) blk_queue_bounce_limit(mq->queue, BLK_BOUNCE_HIGH); blk_queue_max_hw_sectors(mq->queue, min(host->max_blk_count, host->max_req_size / 512)); if (host->can_dma_map_merge) - WARN(!blk_queue_can_use_dma_map_merging(mq->queue, - mmc_dev(host)), + WARN(!blk_queue_can_use_dma_map_merging(mq->queue, dev), "merging was advertised but not possible"); blk_queue_max_segments(mq->queue, mmc_get_max_segments(host)); @@ -389,7 +389,8 @@ static void mmc_setup_queue(struct mmc_queue *mq, struct mmc_card *card) blk_queue_max_segment_size(mq->queue, round_down(host->max_seg_size, block_size)); - dma_set_max_seg_size(mmc_dev(host), queue_max_segment_size(mq->queue)); + dev->dma_parms = devm_kzalloc(dev, sizeof(*dev->dma_parms), GFP_KERNEL); + dma_set_max_seg_size(dev, queue_max_segment_size(mq->queue)); INIT_WORK(&mq->recovery_work, mmc_mq_recovery_handler); INIT_WORK(&mq->complete_work, mmc_blk_mq_complete_work);