Message ID | 1702276972-41296-2-git-send-email-quic_qianyu@quicinc.com (mailing list archive) |
---|---|
State | Not Applicable |
Headers | show |
Series | bus: mhi: host: Add lock to avoid race when ringing channel DB | expand |
On 12/10/2023 11:42 PM, Qiang Yu wrote: > From: Bhaumik Bhatt <bbhatt@codeaurora.org> > > Protect WP accesses such that multiple threads queueing buffers for > incoming data do not race. > > Meanwhile, if CONFIG_TRACE_IRQFLAGS is enabled, irq will be enabled once > __local_bh_enable_ip is called as part of write_unlock_bh. Hence, let's > take irqsave lock after TRE is generated to avoid running write_unlock_bh > when irqsave lock is held. > > Cc: <stable@vger.kernel.org> > Fixes: 189ff97cca53 ("bus: mhi: core: Add support for data transfer") > Signed-off-by: Bhaumik Bhatt <bbhatt@codeaurora.org> > Signed-off-by: Qiang Yu <quic_qianyu@quicinc.com> Seems to work fine for AIC100 Reviewed-by: Jeffrey Hugo <quic_jhugo@quicinc.com> Tested-by: Jeffrey Hugo <quic_jhugo@quicinc.com>
On Mon, Dec 11, 2023 at 02:42:51PM +0800, Qiang Yu wrote: > From: Bhaumik Bhatt <bbhatt@codeaurora.org> > > Protect WP accesses such that multiple threads queueing buffers for > incoming data do not race. > > Meanwhile, if CONFIG_TRACE_IRQFLAGS is enabled, irq will be enabled once > __local_bh_enable_ip is called as part of write_unlock_bh. Hence, let's > take irqsave lock after TRE is generated to avoid running write_unlock_bh > when irqsave lock is held. > > Cc: <stable@vger.kernel.org> > Fixes: 189ff97cca53 ("bus: mhi: core: Add support for data transfer") > Signed-off-by: Bhaumik Bhatt <bbhatt@codeaurora.org> > Signed-off-by: Qiang Yu <quic_qianyu@quicinc.com> Reviewed-by: Manivannan Sadhasivam <manivannan.sadhasivam@linaro.org> - Mani > --- > drivers/bus/mhi/host/main.c | 22 +++++++++++++--------- > 1 file changed, 13 insertions(+), 9 deletions(-) > > diff --git a/drivers/bus/mhi/host/main.c b/drivers/bus/mhi/host/main.c > index dcf627b..32021fe 100644 > --- a/drivers/bus/mhi/host/main.c > +++ b/drivers/bus/mhi/host/main.c > @@ -1122,17 +1122,15 @@ static int mhi_queue(struct mhi_device *mhi_dev, struct mhi_buf_info *buf_info, > if (unlikely(MHI_PM_IN_ERROR_STATE(mhi_cntrl->pm_state))) > return -EIO; > > - read_lock_irqsave(&mhi_cntrl->pm_lock, flags); > - > ret = mhi_is_ring_full(mhi_cntrl, tre_ring); > - if (unlikely(ret)) { > - ret = -EAGAIN; > - goto exit_unlock; > - } > + if (unlikely(ret)) > + return -EAGAIN; > > ret = mhi_gen_tre(mhi_cntrl, mhi_chan, buf_info, mflags); > if (unlikely(ret)) > - goto exit_unlock; > + return ret; > + > + read_lock_irqsave(&mhi_cntrl->pm_lock, flags); > > /* Packet is queued, take a usage ref to exit M3 if necessary > * for host->device buffer, balanced put is done on buffer completion > @@ -1152,7 +1150,6 @@ static int mhi_queue(struct mhi_device *mhi_dev, struct mhi_buf_info *buf_info, > if (dir == DMA_FROM_DEVICE) > mhi_cntrl->runtime_put(mhi_cntrl); > > -exit_unlock: > read_unlock_irqrestore(&mhi_cntrl->pm_lock, flags); > > return ret; > @@ -1204,6 +1201,9 @@ int mhi_gen_tre(struct mhi_controller *mhi_cntrl, struct mhi_chan *mhi_chan, > int eot, eob, chain, bei; > int ret; > > + /* Protect accesses for reading and incrementing WP */ > + write_lock_bh(&mhi_chan->lock); > + > buf_ring = &mhi_chan->buf_ring; > tre_ring = &mhi_chan->tre_ring; > > @@ -1221,8 +1221,10 @@ int mhi_gen_tre(struct mhi_controller *mhi_cntrl, struct mhi_chan *mhi_chan, > > if (!info->pre_mapped) { > ret = mhi_cntrl->map_single(mhi_cntrl, buf_info); > - if (ret) > + if (ret) { > + write_unlock_bh(&mhi_chan->lock); > return ret; > + } > } > > eob = !!(flags & MHI_EOB); > @@ -1239,6 +1241,8 @@ int mhi_gen_tre(struct mhi_controller *mhi_cntrl, struct mhi_chan *mhi_chan, > mhi_add_ring_element(mhi_cntrl, tre_ring); > mhi_add_ring_element(mhi_cntrl, buf_ring); > > + write_unlock_bh(&mhi_chan->lock); > + > return 0; > } > > -- > 2.7.4 > >
diff --git a/drivers/bus/mhi/host/main.c b/drivers/bus/mhi/host/main.c index dcf627b..32021fe 100644 --- a/drivers/bus/mhi/host/main.c +++ b/drivers/bus/mhi/host/main.c @@ -1122,17 +1122,15 @@ static int mhi_queue(struct mhi_device *mhi_dev, struct mhi_buf_info *buf_info, if (unlikely(MHI_PM_IN_ERROR_STATE(mhi_cntrl->pm_state))) return -EIO; - read_lock_irqsave(&mhi_cntrl->pm_lock, flags); - ret = mhi_is_ring_full(mhi_cntrl, tre_ring); - if (unlikely(ret)) { - ret = -EAGAIN; - goto exit_unlock; - } + if (unlikely(ret)) + return -EAGAIN; ret = mhi_gen_tre(mhi_cntrl, mhi_chan, buf_info, mflags); if (unlikely(ret)) - goto exit_unlock; + return ret; + + read_lock_irqsave(&mhi_cntrl->pm_lock, flags); /* Packet is queued, take a usage ref to exit M3 if necessary * for host->device buffer, balanced put is done on buffer completion @@ -1152,7 +1150,6 @@ static int mhi_queue(struct mhi_device *mhi_dev, struct mhi_buf_info *buf_info, if (dir == DMA_FROM_DEVICE) mhi_cntrl->runtime_put(mhi_cntrl); -exit_unlock: read_unlock_irqrestore(&mhi_cntrl->pm_lock, flags); return ret; @@ -1204,6 +1201,9 @@ int mhi_gen_tre(struct mhi_controller *mhi_cntrl, struct mhi_chan *mhi_chan, int eot, eob, chain, bei; int ret; + /* Protect accesses for reading and incrementing WP */ + write_lock_bh(&mhi_chan->lock); + buf_ring = &mhi_chan->buf_ring; tre_ring = &mhi_chan->tre_ring; @@ -1221,8 +1221,10 @@ int mhi_gen_tre(struct mhi_controller *mhi_cntrl, struct mhi_chan *mhi_chan, if (!info->pre_mapped) { ret = mhi_cntrl->map_single(mhi_cntrl, buf_info); - if (ret) + if (ret) { + write_unlock_bh(&mhi_chan->lock); return ret; + } } eob = !!(flags & MHI_EOB); @@ -1239,6 +1241,8 @@ int mhi_gen_tre(struct mhi_controller *mhi_cntrl, struct mhi_chan *mhi_chan, mhi_add_ring_element(mhi_cntrl, tre_ring); mhi_add_ring_element(mhi_cntrl, buf_ring); + write_unlock_bh(&mhi_chan->lock); + return 0; }