Message ID | 20240603164354.79035-1-manivannan.sadhasivam@linaro.org (mailing list archive) |
---|---|
State | Not Applicable |
Headers | show |
Series | bus: mhi: ep: Do not allocate memory for MHI objects from DMA zone | expand |
On 6/3/2024 9:43 AM, Manivannan Sadhasivam wrote: > MHI endpoint stack accidentally started allocating memory for objects from > DMA zone since commit 62210a26cd4f ("bus: mhi: ep: Use slab allocator Going through mentioned commit it seems that it was intended purpose to use GFP_DMA/SLAB > where applicable"). But there is no real need to allocate memory from this > naturally limited DMA zone. This also causes the MHI endpoint stack to run > out of memory while doing high bandwidth transfers. > > So let's switch over to normal memory. > > Cc: stable@vger.kernel.org # 6.8 > Fixes: 62210a26cd4f ("bus: mhi: ep: Use slab allocator where applicable") > Signed-off-by: Manivannan Sadhasivam <manivannan.sadhasivam@linaro.org> > --- > drivers/bus/mhi/ep/main.c | 14 +++++++------- > 1 file changed, 7 insertions(+), 7 deletions(-) > > diff --git a/drivers/bus/mhi/ep/main.c b/drivers/bus/mhi/ep/main.c > index f8f674adf1d4..4acfac73ca9a 100644 > --- a/drivers/bus/mhi/ep/main.c > +++ b/drivers/bus/mhi/ep/main.c > @@ -90,7 +90,7 @@ static int mhi_ep_send_completion_event(struct mhi_ep_cntrl *mhi_cntrl, struct m > struct mhi_ring_element *event; > int ret; > > - event = kmem_cache_zalloc(mhi_cntrl->ev_ring_el_cache, GFP_KERNEL | GFP_DMA); > + event = kmem_cache_zalloc(mhi_cntrl->ev_ring_el_cache, GFP_KERNEL); > if (!event) > return -ENOMEM; > > @@ -109,7 +109,7 @@ int mhi_ep_send_state_change_event(struct mhi_ep_cntrl *mhi_cntrl, enum mhi_stat > struct mhi_ring_element *event; > int ret; > > - event = kmem_cache_zalloc(mhi_cntrl->ev_ring_el_cache, GFP_KERNEL | GFP_DMA); > + event = kmem_cache_zalloc(mhi_cntrl->ev_ring_el_cache, GFP_KERNEL); > if (!event) > return -ENOMEM; > > @@ -127,7 +127,7 @@ int mhi_ep_send_ee_event(struct mhi_ep_cntrl *mhi_cntrl, enum mhi_ee_type exec_e > struct mhi_ring_element *event; > int ret; > > - event = kmem_cache_zalloc(mhi_cntrl->ev_ring_el_cache, GFP_KERNEL | GFP_DMA); > + event = kmem_cache_zalloc(mhi_cntrl->ev_ring_el_cache, GFP_KERNEL); > if (!event) > return -ENOMEM; > > @@ -146,7 +146,7 @@ static int mhi_ep_send_cmd_comp_event(struct mhi_ep_cntrl *mhi_cntrl, enum mhi_e > struct mhi_ring_element *event; > int ret; > > - event = kmem_cache_zalloc(mhi_cntrl->ev_ring_el_cache, GFP_KERNEL | GFP_DMA); > + event = kmem_cache_zalloc(mhi_cntrl->ev_ring_el_cache, GFP_KERNEL); > if (!event) > return -ENOMEM; > > @@ -438,7 +438,7 @@ static int mhi_ep_read_channel(struct mhi_ep_cntrl *mhi_cntrl, > read_offset = mhi_chan->tre_size - mhi_chan->tre_bytes_left; > write_offset = len - buf_left; > > - buf_addr = kmem_cache_zalloc(mhi_cntrl->tre_buf_cache, GFP_KERNEL | GFP_DMA); > + buf_addr = kmem_cache_zalloc(mhi_cntrl->tre_buf_cache, GFP_KERNEL); > if (!buf_addr) > return -ENOMEM; > > @@ -1481,14 +1481,14 @@ int mhi_ep_register_controller(struct mhi_ep_cntrl *mhi_cntrl, > > mhi_cntrl->ev_ring_el_cache = kmem_cache_create("mhi_ep_event_ring_el", > sizeof(struct mhi_ring_element), 0, > - SLAB_CACHE_DMA, NULL); > + 0, NULL); > if (!mhi_cntrl->ev_ring_el_cache) { > ret = -ENOMEM; > goto err_free_cmd; > } > > mhi_cntrl->tre_buf_cache = kmem_cache_create("mhi_ep_tre_buf", MHI_EP_DEFAULT_MTU, 0, > - SLAB_CACHE_DMA, NULL); > + 0, NULL); > if (!mhi_cntrl->tre_buf_cache) { > ret = -ENOMEM; > goto err_destroy_ev_ring_el_cache; Reviewed-by: Mayank Rana <quic_mrana@quicinc.com>
diff --git a/drivers/bus/mhi/ep/main.c b/drivers/bus/mhi/ep/main.c index f8f674adf1d4..4acfac73ca9a 100644 --- a/drivers/bus/mhi/ep/main.c +++ b/drivers/bus/mhi/ep/main.c @@ -90,7 +90,7 @@ static int mhi_ep_send_completion_event(struct mhi_ep_cntrl *mhi_cntrl, struct m struct mhi_ring_element *event; int ret; - event = kmem_cache_zalloc(mhi_cntrl->ev_ring_el_cache, GFP_KERNEL | GFP_DMA); + event = kmem_cache_zalloc(mhi_cntrl->ev_ring_el_cache, GFP_KERNEL); if (!event) return -ENOMEM; @@ -109,7 +109,7 @@ int mhi_ep_send_state_change_event(struct mhi_ep_cntrl *mhi_cntrl, enum mhi_stat struct mhi_ring_element *event; int ret; - event = kmem_cache_zalloc(mhi_cntrl->ev_ring_el_cache, GFP_KERNEL | GFP_DMA); + event = kmem_cache_zalloc(mhi_cntrl->ev_ring_el_cache, GFP_KERNEL); if (!event) return -ENOMEM; @@ -127,7 +127,7 @@ int mhi_ep_send_ee_event(struct mhi_ep_cntrl *mhi_cntrl, enum mhi_ee_type exec_e struct mhi_ring_element *event; int ret; - event = kmem_cache_zalloc(mhi_cntrl->ev_ring_el_cache, GFP_KERNEL | GFP_DMA); + event = kmem_cache_zalloc(mhi_cntrl->ev_ring_el_cache, GFP_KERNEL); if (!event) return -ENOMEM; @@ -146,7 +146,7 @@ static int mhi_ep_send_cmd_comp_event(struct mhi_ep_cntrl *mhi_cntrl, enum mhi_e struct mhi_ring_element *event; int ret; - event = kmem_cache_zalloc(mhi_cntrl->ev_ring_el_cache, GFP_KERNEL | GFP_DMA); + event = kmem_cache_zalloc(mhi_cntrl->ev_ring_el_cache, GFP_KERNEL); if (!event) return -ENOMEM; @@ -438,7 +438,7 @@ static int mhi_ep_read_channel(struct mhi_ep_cntrl *mhi_cntrl, read_offset = mhi_chan->tre_size - mhi_chan->tre_bytes_left; write_offset = len - buf_left; - buf_addr = kmem_cache_zalloc(mhi_cntrl->tre_buf_cache, GFP_KERNEL | GFP_DMA); + buf_addr = kmem_cache_zalloc(mhi_cntrl->tre_buf_cache, GFP_KERNEL); if (!buf_addr) return -ENOMEM; @@ -1481,14 +1481,14 @@ int mhi_ep_register_controller(struct mhi_ep_cntrl *mhi_cntrl, mhi_cntrl->ev_ring_el_cache = kmem_cache_create("mhi_ep_event_ring_el", sizeof(struct mhi_ring_element), 0, - SLAB_CACHE_DMA, NULL); + 0, NULL); if (!mhi_cntrl->ev_ring_el_cache) { ret = -ENOMEM; goto err_free_cmd; } mhi_cntrl->tre_buf_cache = kmem_cache_create("mhi_ep_tre_buf", MHI_EP_DEFAULT_MTU, 0, - SLAB_CACHE_DMA, NULL); + 0, NULL); if (!mhi_cntrl->tre_buf_cache) { ret = -ENOMEM; goto err_destroy_ev_ring_el_cache;
MHI endpoint stack accidentally started allocating memory for objects from DMA zone since commit 62210a26cd4f ("bus: mhi: ep: Use slab allocator where applicable"). But there is no real need to allocate memory from this naturally limited DMA zone. This also causes the MHI endpoint stack to run out of memory while doing high bandwidth transfers. So let's switch over to normal memory. Cc: stable@vger.kernel.org # 6.8 Fixes: 62210a26cd4f ("bus: mhi: ep: Use slab allocator where applicable") Signed-off-by: Manivannan Sadhasivam <manivannan.sadhasivam@linaro.org> --- drivers/bus/mhi/ep/main.c | 14 +++++++------- 1 file changed, 7 insertions(+), 7 deletions(-)