diff mbox series

wwan: core: Support slicing in port TX flow of WWAN subsystem

Message ID 20221026011540.8499-1-haozhe.chang@mediatek.com (mailing list archive)
State Superseded
Delegated to: Netdev Maintainers
Headers show
Series wwan: core: Support slicing in port TX flow of WWAN subsystem | expand

Checks

Context Check Description
netdev/tree_selection success Guessed tree name to be net-next
netdev/fixes_present success Fixes tag not required for -next series
netdev/subject_prefix warning Target tree name not specified in the subject
netdev/cover_letter success Single patches do not need cover letters
netdev/patch_count success Link
netdev/header_inline success No static functions without inline keyword in header files
netdev/build_32bit success Errors and warnings before: 1 this patch: 1
netdev/cc_maintainers warning 3 maintainers not CCed: matthias.bgg@gmail.com linux-mediatek@lists.infradead.org linux-arm-kernel@lists.infradead.org
netdev/build_clang success Errors and warnings before: 0 this patch: 0
netdev/module_param success Was 0 now: 0
netdev/verify_signedoff success Signed-off-by tag matches author and committer
netdev/check_selftest success No net selftest shell script
netdev/verify_fixes success No Fixes tag
netdev/build_allmodconfig_warn success Errors and warnings before: 0 this patch: 0
netdev/checkpatch warning WARNING: line length of 81 exceeds 80 columns WARNING: line length of 87 exceeds 80 columns WARNING: line length of 88 exceeds 80 columns
netdev/kdoc success Errors and warnings before: 0 this patch: 0
netdev/source_inline success Was 0 now: 0

Commit Message

Haozhe Chang (常浩哲) Oct. 26, 2022, 1:15 a.m. UTC
From: haozhe chang <haozhe.chang@mediatek.com>

wwan_port_fops_write inputs the SKB parameter to the TX callback of
the WWAN device driver. However, the WWAN device (e.g., t7xx) may
have an MTU less than the size of SKB, causing the TX buffer to be
sliced and copied once more in the WWAN device driver.

This patch implements the slicing in the WWAN subsystem and gives
the WWAN devices driver the option to slice(by chunk) or not. By
doing so, the additional memory copy is reduced.

Meanwhile, this patch gives WWAN devices driver the option to reserve
headroom in SKB for the device-specific metadata.

Signed-off-by: haozhe chang <haozhe.chang@mediatek.com>
---
 drivers/net/wwan/t7xx/t7xx_port_wwan.c | 41 ++++++++++++-----------
 drivers/net/wwan/wwan_core.c           | 45 ++++++++++++++++++--------
 include/linux/wwan.h                   |  5 ++-
 3 files changed, 56 insertions(+), 35 deletions(-)

Comments

Loic Poulain Oct. 26, 2022, 7:28 a.m. UTC | #1
Hi Haozhe,

On Wed, 26 Oct 2022 at 03:16, <haozhe.chang@mediatek.com> wrote:
>
> From: haozhe chang <haozhe.chang@mediatek.com>
>
> wwan_port_fops_write inputs the SKB parameter to the TX callback of
> the WWAN device driver. However, the WWAN device (e.g., t7xx) may
> have an MTU less than the size of SKB, causing the TX buffer to be
> sliced and copied once more in the WWAN device driver.

The benefit of putting data in an skb is that it is easy to
manipulate, so not sure why there is an additional copy in the first
place. Isn't possible for the t7xx driver to consume the skb
progressively (without intermediate copy), according to its own MTU
limitation?

>
> This patch implements the slicing in the WWAN subsystem and gives
> the WWAN devices driver the option to slice(by chunk) or not. By
> doing so, the additional memory copy is reduced.
>
> Meanwhile, this patch gives WWAN devices driver the option to reserve
> headroom in SKB for the device-specific metadata.
>
> Signed-off-by: haozhe chang <haozhe.chang@mediatek.com>
> ---
>  drivers/net/wwan/t7xx/t7xx_port_wwan.c | 41 ++++++++++++-----------
>  drivers/net/wwan/wwan_core.c           | 45 ++++++++++++++++++--------
>  include/linux/wwan.h                   |  5 ++-
>  3 files changed, 56 insertions(+), 35 deletions(-)
>
> diff --git a/drivers/net/wwan/t7xx/t7xx_port_wwan.c b/drivers/net/wwan/t7xx/t7xx_port_wwan.c
> index 33931bfd78fd..5e8589582121 100644
> --- a/drivers/net/wwan/t7xx/t7xx_port_wwan.c
> +++ b/drivers/net/wwan/t7xx/t7xx_port_wwan.c
> @@ -54,13 +54,12 @@ static void t7xx_port_ctrl_stop(struct wwan_port *port)
>  static int t7xx_port_ctrl_tx(struct wwan_port *port, struct sk_buff *skb)
>  {
>         struct t7xx_port *port_private = wwan_port_get_drvdata(port);
> -       size_t len, offset, chunk_len = 0, txq_mtu = CLDMA_MTU;
>         const struct t7xx_port_conf *port_conf;
>         struct t7xx_fsm_ctl *ctl;
>         enum md_state md_state;
> +       int ret;
>
> -       len = skb->len;
> -       if (!len || !port_private->chan_enable)
> +       if (!port_private->chan_enable)
>                 return -EINVAL;
>
>         port_conf = port_private->port_conf;
> @@ -72,33 +71,33 @@ static int t7xx_port_ctrl_tx(struct wwan_port *port, struct sk_buff *skb)
>                 return -ENODEV;
>         }
>
> -       for (offset = 0; offset < len; offset += chunk_len) {
> -               struct sk_buff *skb_ccci;
> -               int ret;
> -
> -               chunk_len = min(len - offset, txq_mtu - sizeof(struct ccci_header));
> -               skb_ccci = t7xx_port_alloc_skb(chunk_len);
> -               if (!skb_ccci)
> -                       return -ENOMEM;
> -
> -               skb_put_data(skb_ccci, skb->data + offset, chunk_len);
> -               ret = t7xx_port_send_skb(port_private, skb_ccci, 0, 0);
> -               if (ret) {
> -                       dev_kfree_skb_any(skb_ccci);
> -                       dev_err(port_private->dev, "Write error on %s port, %d\n",
> -                               port_conf->name, ret);
> -                       return ret;
> -               }
> +       ret = t7xx_port_send_skb(port_private, skb, 0, 0);
> +       if (ret) {
> +               dev_err(port_private->dev, "Write error on %s port, %d\n",
> +                       port_conf->name, ret);
> +               return ret;
>         }
> -
>         dev_kfree_skb(skb);
> +
>         return 0;
>  }
>
> +static size_t t7xx_port_get_tx_rsvd_headroom(struct wwan_port *port)
> +{
> +       return sizeof(struct ccci_header);
> +}
> +
> +static size_t t7xx_port_get_tx_chunk_len(struct wwan_port *port)
> +{
> +       return CLDMA_MTU - sizeof(struct ccci_header);
> +}
> +
>  static const struct wwan_port_ops wwan_ops = {
>         .start = t7xx_port_ctrl_start,
>         .stop = t7xx_port_ctrl_stop,
>         .tx = t7xx_port_ctrl_tx,
> +       .get_tx_rsvd_headroom = t7xx_port_get_tx_rsvd_headroom,

Can't we have a simple 'skb_headroom' or 'needed_headroom' member here?

> +       .get_tx_chunk_len = t7xx_port_get_tx_chunk_len,
>  };
>
Haozhe Chang (常浩哲) Oct. 26, 2022, 11:45 a.m. UTC | #2
On Wed, 2022-10-26 at 15:28 +0800, Loic Poulain wrote:
> Hi Haozhe,
> 
> On Wed, 26 Oct 2022 at 03:16, <haozhe.chang@mediatek.com> wrote:
> > 
> > From: haozhe chang <haozhe.chang@mediatek.com>
> > 
> > wwan_port_fops_write inputs the SKB parameter to the TX callback of
> > the WWAN device driver. However, the WWAN device (e.g., t7xx) may
> > have an MTU less than the size of SKB, causing the TX buffer to be
> > sliced and copied once more in the WWAN device driver.
> 
> The benefit of putting data in an skb is that it is easy to
> manipulate, so not sure why there is an additional copy in the first
> place. Isn't possible for the t7xx driver to consume the skb
> progressively (without intermediate copy), according to its own MTU
> limitation?
> 
t7xx driver needs to add metadata to the SKB head for each fragment, so
the driver has to allocate a new buffer to copy data(skb_put_data) and
insert metadata. 
Providing the option to slice in common layer benefits varieties of
devices with different DMA capabilities. The patch is also compatible
with existing WWAN devices.
> > 
> > This patch implements the slicing in the WWAN subsystem and gives
> > the WWAN devices driver the option to slice(by chunk) or not. By
> > doing so, the additional memory copy is reduced.
> > 
> > Meanwhile, this patch gives WWAN devices driver the option to
> > reserve
> > headroom in SKB for the device-specific metadata.
> > 
> > Signed-off-by: haozhe chang <haozhe.chang@mediatek.com>
> > ---
> >  drivers/net/wwan/t7xx/t7xx_port_wwan.c | 41 ++++++++++++--------
> > ---
> >  drivers/net/wwan/wwan_core.c           | 45 ++++++++++++++++++--
> > ------
> >  include/linux/wwan.h                   |  5 ++-
> >  3 files changed, 56 insertions(+), 35 deletions(-)
> > 
> > diff --git a/drivers/net/wwan/t7xx/t7xx_port_wwan.c
> > b/drivers/net/wwan/t7xx/t7xx_port_wwan.c
> > index 33931bfd78fd..5e8589582121 100644
> > --- a/drivers/net/wwan/t7xx/t7xx_port_wwan.c
> > +++ b/drivers/net/wwan/t7xx/t7xx_port_wwan.c
> > @@ -54,13 +54,12 @@ static void t7xx_port_ctrl_stop(struct
> > wwan_port *port)
> >  static int t7xx_port_ctrl_tx(struct wwan_port *port, struct
> > sk_buff *skb)
> >  {
> >         struct t7xx_port *port_private =
> > wwan_port_get_drvdata(port);
> > -       size_t len, offset, chunk_len = 0, txq_mtu = CLDMA_MTU;
> >         const struct t7xx_port_conf *port_conf;
> >         struct t7xx_fsm_ctl *ctl;
> >         enum md_state md_state;
> > +       int ret;
> > 
> > -       len = skb->len;
> > -       if (!len || !port_private->chan_enable)
> > +       if (!port_private->chan_enable)
> >                 return -EINVAL;
> > 
> >         port_conf = port_private->port_conf;
> > @@ -72,33 +71,33 @@ static int t7xx_port_ctrl_tx(struct wwan_port
> > *port, struct sk_buff *skb)
> >                 return -ENODEV;
> >         }
> > 
> > -       for (offset = 0; offset < len; offset += chunk_len) {
> > -               struct sk_buff *skb_ccci;
> > -               int ret;
> > -
> > -               chunk_len = min(len - offset, txq_mtu -
> > sizeof(struct ccci_header));
> > -               skb_ccci = t7xx_port_alloc_skb(chunk_len);
> > -               if (!skb_ccci)
> > -                       return -ENOMEM;
> > -
> > -               skb_put_data(skb_ccci, skb->data + offset,
> > chunk_len);
> > -               ret = t7xx_port_send_skb(port_private, skb_ccci, 0,
> > 0);
> > -               if (ret) {
> > -                       dev_kfree_skb_any(skb_ccci);
> > -                       dev_err(port_private->dev, "Write error on
> > %s port, %d\n",
> > -                               port_conf->name, ret);
> > -                       return ret;
> > -               }
> > +       ret = t7xx_port_send_skb(port_private, skb, 0, 0);
> > +       if (ret) {
> > +               dev_err(port_private->dev, "Write error on %s port,
> > %d\n",
> > +                       port_conf->name, ret);
> > +               return ret;
> >         }
> > -
> >         dev_kfree_skb(skb);
> > +
> >         return 0;
> >  }
> > 
> > +static size_t t7xx_port_get_tx_rsvd_headroom(struct wwan_port
> > *port)
> > +{
> > +       return sizeof(struct ccci_header);
> > +}
> > +
> > +static size_t t7xx_port_get_tx_chunk_len(struct wwan_port *port)
> > +{
> > +       return CLDMA_MTU - sizeof(struct ccci_header);
> > +}
> > +
> >  static const struct wwan_port_ops wwan_ops = {
> >         .start = t7xx_port_ctrl_start,
> >         .stop = t7xx_port_ctrl_stop,
> >         .tx = t7xx_port_ctrl_tx,
> > +       .get_tx_rsvd_headroom = t7xx_port_get_tx_rsvd_headroom,
> 
> Can't we have a simple 'skb_headroom' or 'needed_headroom' member
> here?
> 
OK, I will change it in patch v2.
> > +       .get_tx_chunk_len = t7xx_port_get_tx_chunk_len,
> >  };
> >
Loic Poulain Oct. 26, 2022, 2:27 p.m. UTC | #3
On Wed, 26 Oct 2022 at 13:45, haozhe chang <haozhe.chang@mediatek.com> wrote:
>
> On Wed, 2022-10-26 at 15:28 +0800, Loic Poulain wrote:
> > Hi Haozhe,
> >
> > On Wed, 26 Oct 2022 at 03:16, <haozhe.chang@mediatek.com> wrote:
> > >
> > > From: haozhe chang <haozhe.chang@mediatek.com>
> > >
> > > wwan_port_fops_write inputs the SKB parameter to the TX callback of
> > > the WWAN device driver. However, the WWAN device (e.g., t7xx) may
> > > have an MTU less than the size of SKB, causing the TX buffer to be
> > > sliced and copied once more in the WWAN device driver.
> >
> > The benefit of putting data in an skb is that it is easy to
> > manipulate, so not sure why there is an additional copy in the first
> > place. Isn't possible for the t7xx driver to consume the skb
> > progressively (without intermediate copy), according to its own MTU
> > limitation?
> >
> t7xx driver needs to add metadata to the SKB head for each fragment, so
> the driver has to allocate a new buffer to copy data(skb_put_data) and
> insert metadata.

Normally, once the first part (chunk) of the skb has been consumed
(skb_pull) and written to the device, it will become part of the
skb headroom, which can then be used for appending (skb_push) the
header (metadata) of the second chunks, and so... right?

Just want to avoid a bunch of unnecessary copy/alloc here.

Regards,
Loic
Haozhe Chang (常浩哲) Oct. 27, 2022, 1:19 a.m. UTC | #4
On Wed, 2022-10-26 at 22:27 +0800, Loic Poulain wrote:
> On Wed, 26 Oct 2022 at 13:45, haozhe chang <haozhe.chang@mediatek.com
> > wrote:
> > 
> > On Wed, 2022-10-26 at 15:28 +0800, Loic Poulain wrote:
> > > Hi Haozhe,
> > > 
> > > On Wed, 26 Oct 2022 at 03:16, <haozhe.chang@mediatek.com> wrote:
> > > > 
> > > > From: haozhe chang <haozhe.chang@mediatek.com>
> > > > 
> > > > wwan_port_fops_write inputs the SKB parameter to the TX
> > > > callback of
> > > > the WWAN device driver. However, the WWAN device (e.g., t7xx)
> > > > may
> > > > have an MTU less than the size of SKB, causing the TX buffer to
> > > > be
> > > > sliced and copied once more in the WWAN device driver.
> > > 
> > > The benefit of putting data in an skb is that it is easy to
> > > manipulate, so not sure why there is an additional copy in the
> > > first
> > > place. Isn't possible for the t7xx driver to consume the skb
> > > progressively (without intermediate copy), according to its own
> > > MTU
> > > limitation?
> > > 
> > 
> > t7xx driver needs to add metadata to the SKB head for each
> > fragment, so
> > the driver has to allocate a new buffer to copy data(skb_put_data)
> > and
> > insert metadata.
> 
> Normally, once the first part (chunk) of the skb has been consumed
> (skb_pull) and written to the device, it will become part of the
> skb headroom, which can then be used for appending (skb_push) the
> header (metadata) of the second chunks, and so... right?
> 
> Just want to avoid a bunch of unnecessary copy/alloc here.
> 
t7xx DMA can transfer multiple fragments at once, if done as
recomended, the DMA performance will be inhibited.
> Regards,
> Loic
Loic Poulain Oct. 29, 2022, 8:04 a.m. UTC | #5
On Thu, 27 Oct 2022 at 03:19, haozhe chang <haozhe.chang@mediatek.com> wrote:
>
> On Wed, 2022-10-26 at 22:27 +0800, Loic Poulain wrote:
> > On Wed, 26 Oct 2022 at 13:45, haozhe chang <haozhe.chang@mediatek.com
> > > wrote:
> > >
> > > On Wed, 2022-10-26 at 15:28 +0800, Loic Poulain wrote:
> > > > Hi Haozhe,
> > > >
> > > > On Wed, 26 Oct 2022 at 03:16, <haozhe.chang@mediatek.com> wrote:
> > > > >
> > > > > From: haozhe chang <haozhe.chang@mediatek.com>
> > > > >
> > > > > wwan_port_fops_write inputs the SKB parameter to the TX
> > > > > callback of
> > > > > the WWAN device driver. However, the WWAN device (e.g., t7xx)
> > > > > may
> > > > > have an MTU less than the size of SKB, causing the TX buffer to
> > > > > be
> > > > > sliced and copied once more in the WWAN device driver.
> > > >
> > > > The benefit of putting data in an skb is that it is easy to
> > > > manipulate, so not sure why there is an additional copy in the
> > > > first
> > > > place. Isn't possible for the t7xx driver to consume the skb
> > > > progressively (without intermediate copy), according to its own
> > > > MTU
> > > > limitation?
> > > >
> > >
> > > t7xx driver needs to add metadata to the SKB head for each
> > > fragment, so
> > > the driver has to allocate a new buffer to copy data(skb_put_data)
> > > and
> > > insert metadata.
> >
> > Normally, once the first part (chunk) of the skb has been consumed
> > (skb_pull) and written to the device, it will become part of the
> > skb headroom, which can then be used for appending (skb_push) the
> > header (metadata) of the second chunks, and so... right?
> >
> > Just want to avoid a bunch of unnecessary copy/alloc here.
> >
> t7xx DMA can transfer multiple fragments at once, if done as
> recomended, the DMA performance will be inhibited.

OK, so the skb fragmentation is valid in t7xx case, but the way of
doing it is kind of specific to t7xx. Maybe a more acceptable solution
for a generic fragmentation feature would be to keep the extra
fragments part of the 'main' skb, using skb chaining. That would allow
the fragments to stay linked to a specific user transfer. So if
fragmentation is enabled for a given driver, core only fills the
initial skb with MTU size, and appends additional skb as fragments,
you can look at mhi_net_skb_agg() for skb chaining example.

Regards,
Loic
Haozhe Chang (常浩哲) Nov. 2, 2022, 5:27 a.m. UTC | #6
On Sat, 2022-10-29 at 10:04 +0200, Loic Poulain wrote:
> On Thu, 27 Oct 2022 at 03:19, haozhe chang <haozhe.chang@mediatek.com
> > wrote:
> > 
> > On Wed, 2022-10-26 at 22:27 +0800, Loic Poulain wrote:
> > > On Wed, 26 Oct 2022 at 13:45, haozhe chang <
> > > haozhe.chang@mediatek.com
> > > > wrote:
> > > > 
> > > > On Wed, 2022-10-26 at 15:28 +0800, Loic Poulain wrote:
> > > > > Hi Haozhe,
> > > > > 
> > > > > On Wed, 26 Oct 2022 at 03:16, <haozhe.chang@mediatek.com>
> > > > > wrote:
> > > > > > 
> > > > > > From: haozhe chang <haozhe.chang@mediatek.com>
> > > > > > 
> > > > > > wwan_port_fops_write inputs the SKB parameter to the TX
> > > > > > callback of
> > > > > > the WWAN device driver. However, the WWAN device (e.g.,
> > > > > > t7xx)
> > > > > > may
> > > > > > have an MTU less than the size of SKB, causing the TX
> > > > > > buffer to
> > > > > > be
> > > > > > sliced and copied once more in the WWAN device driver.
> > > > > 
> > > > > The benefit of putting data in an skb is that it is easy to
> > > > > manipulate, so not sure why there is an additional copy in
> > > > > the
> > > > > first
> > > > > place. Isn't possible for the t7xx driver to consume the skb
> > > > > progressively (without intermediate copy), according to its
> > > > > own
> > > > > MTU
> > > > > limitation?
> > > > > 
> > > > 
> > > > t7xx driver needs to add metadata to the SKB head for each
> > > > fragment, so
> > > > the driver has to allocate a new buffer to copy
> > > > data(skb_put_data)
> > > > and
> > > > insert metadata.
> > > 
> > > Normally, once the first part (chunk) of the skb has been
> > > consumed
> > > (skb_pull) and written to the device, it will become part of the
> > > skb headroom, which can then be used for appending (skb_push) the
> > > header (metadata) of the second chunks, and so... right?
> > > 
> > > Just want to avoid a bunch of unnecessary copy/alloc here.
> > > 
> > 
> > t7xx DMA can transfer multiple fragments at once, if done as
> > recomended, the DMA performance will be inhibited.
> 
> OK, so the skb fragmentation is valid in t7xx case, but the way of
> doing it is kind of specific to t7xx. Maybe a more acceptable
> solution
> for a generic fragmentation feature would be to keep the extra
> fragments part of the 'main' skb, using skb chaining. That would
> allow
> the fragments to stay linked to a specific user transfer. So if
> fragmentation is enabled for a given driver, core only fills the
> initial skb with MTU size, and appends additional skb as fragments,
> you can look at mhi_net_skb_agg() for skb chaining example.
> 
OK, It is a good suggestion, I will implement it in patch v2. Also, any
advice on reserved headroom?
> Regards,
> Loic
diff mbox series

Patch

diff --git a/drivers/net/wwan/t7xx/t7xx_port_wwan.c b/drivers/net/wwan/t7xx/t7xx_port_wwan.c
index 33931bfd78fd..5e8589582121 100644
--- a/drivers/net/wwan/t7xx/t7xx_port_wwan.c
+++ b/drivers/net/wwan/t7xx/t7xx_port_wwan.c
@@ -54,13 +54,12 @@  static void t7xx_port_ctrl_stop(struct wwan_port *port)
 static int t7xx_port_ctrl_tx(struct wwan_port *port, struct sk_buff *skb)
 {
 	struct t7xx_port *port_private = wwan_port_get_drvdata(port);
-	size_t len, offset, chunk_len = 0, txq_mtu = CLDMA_MTU;
 	const struct t7xx_port_conf *port_conf;
 	struct t7xx_fsm_ctl *ctl;
 	enum md_state md_state;
+	int ret;
 
-	len = skb->len;
-	if (!len || !port_private->chan_enable)
+	if (!port_private->chan_enable)
 		return -EINVAL;
 
 	port_conf = port_private->port_conf;
@@ -72,33 +71,33 @@  static int t7xx_port_ctrl_tx(struct wwan_port *port, struct sk_buff *skb)
 		return -ENODEV;
 	}
 
-	for (offset = 0; offset < len; offset += chunk_len) {
-		struct sk_buff *skb_ccci;
-		int ret;
-
-		chunk_len = min(len - offset, txq_mtu - sizeof(struct ccci_header));
-		skb_ccci = t7xx_port_alloc_skb(chunk_len);
-		if (!skb_ccci)
-			return -ENOMEM;
-
-		skb_put_data(skb_ccci, skb->data + offset, chunk_len);
-		ret = t7xx_port_send_skb(port_private, skb_ccci, 0, 0);
-		if (ret) {
-			dev_kfree_skb_any(skb_ccci);
-			dev_err(port_private->dev, "Write error on %s port, %d\n",
-				port_conf->name, ret);
-			return ret;
-		}
+	ret = t7xx_port_send_skb(port_private, skb, 0, 0);
+	if (ret) {
+		dev_err(port_private->dev, "Write error on %s port, %d\n",
+			port_conf->name, ret);
+		return ret;
 	}
-
 	dev_kfree_skb(skb);
+
 	return 0;
 }
 
+static size_t t7xx_port_get_tx_rsvd_headroom(struct wwan_port *port)
+{
+	return sizeof(struct ccci_header);
+}
+
+static size_t t7xx_port_get_tx_chunk_len(struct wwan_port *port)
+{
+	return CLDMA_MTU - sizeof(struct ccci_header);
+}
+
 static const struct wwan_port_ops wwan_ops = {
 	.start = t7xx_port_ctrl_start,
 	.stop = t7xx_port_ctrl_stop,
 	.tx = t7xx_port_ctrl_tx,
+	.get_tx_rsvd_headroom = t7xx_port_get_tx_rsvd_headroom,
+	.get_tx_chunk_len = t7xx_port_get_tx_chunk_len,
 };
 
 static int t7xx_port_wwan_init(struct t7xx_port *port)
diff --git a/drivers/net/wwan/wwan_core.c b/drivers/net/wwan/wwan_core.c
index 62e9f7d6c9fe..366d324f7132 100644
--- a/drivers/net/wwan/wwan_core.c
+++ b/drivers/net/wwan/wwan_core.c
@@ -20,7 +20,7 @@ 
 #include <uapi/linux/wwan.h>
 
 /* Maximum number of minors in use */
-#define WWAN_MAX_MINORS		(1 << MINORBITS)
+#define WWAN_MAX_MINORS		BIT(MINORBITS)
 
 static DEFINE_MUTEX(wwan_register_lock); /* WWAN device create|remove lock */
 static DEFINE_IDA(minors); /* minors for WWAN port chardevs */
@@ -67,6 +67,8 @@  struct wwan_device {
  * @rxq: Buffer inbound queue
  * @waitqueue: The waitqueue for port fops (read/write/poll)
  * @data_lock: Port specific data access serialization
+ * @rsvd_headroom_len: SKB reserved headroom size
+ * @chunk_len: Chunk len to split packet
  * @at_data: AT port specific data
  */
 struct wwan_port {
@@ -79,6 +81,8 @@  struct wwan_port {
 	struct sk_buff_head rxq;
 	wait_queue_head_t waitqueue;
 	struct mutex data_lock;	/* Port specific data access serialization */
+	size_t rsvd_headroom_len;
+	size_t chunk_len;
 	union {
 		struct {
 			struct ktermios termios;
@@ -550,8 +554,13 @@  static int wwan_port_op_start(struct wwan_port *port)
 	}
 
 	/* If port is already started, don't start again */
-	if (!port->start_count)
+	if (!port->start_count) {
 		ret = port->ops->start(port);
+		if (port->ops->get_tx_chunk_len)
+			port->chunk_len = port->ops->get_tx_chunk_len(port);
+		if (port->ops->get_tx_rsvd_headroom)
+			port->rsvd_headroom_len = port->ops->get_tx_rsvd_headroom(port);
+	}
 
 	if (!ret)
 		port->start_count++;
@@ -698,6 +707,7 @@  static ssize_t wwan_port_fops_read(struct file *filp, char __user *buf,
 static ssize_t wwan_port_fops_write(struct file *filp, const char __user *buf,
 				    size_t count, loff_t *offp)
 {
+	size_t len, chunk_len, offset, allowed_chunk_len;
 	struct wwan_port *port = filp->private_data;
 	struct sk_buff *skb;
 	int ret;
@@ -706,19 +716,28 @@  static ssize_t wwan_port_fops_write(struct file *filp, const char __user *buf,
 	if (ret)
 		return ret;
 
-	skb = alloc_skb(count, GFP_KERNEL);
-	if (!skb)
-		return -ENOMEM;
-
-	if (copy_from_user(skb_put(skb, count), buf, count)) {
-		kfree_skb(skb);
-		return -EFAULT;
-	}
+	allowed_chunk_len = port->chunk_len ? port->chunk_len : count;
+	for (offset = 0; offset < count; offset += chunk_len) {
+		chunk_len = min(count - offset, allowed_chunk_len);
+		len = chunk_len + port->rsvd_headroom_len;
+		skb = alloc_skb(len, GFP_KERNEL);
+		if (!skb)
+			return offset ? offset : -ENOMEM;
+
+		skb_reserve(skb, port->rsvd_headroom_len);
+		if (copy_from_user(skb_put(skb, chunk_len), buf + offset, chunk_len)) {
+			kfree_skb(skb);
+			return offset ? offset : -EFAULT;
+		}
 
-	ret = wwan_port_op_tx(port, skb, !!(filp->f_flags & O_NONBLOCK));
-	if (ret) {
+		ret = wwan_port_op_tx(port, skb, !!(filp->f_flags & O_NONBLOCK));
+		if (!ret)
+			continue;
 		kfree_skb(skb);
-		return ret;
+		if (ret < 0)
+			return offset ? offset : ret;
+		if (ret > 0 && ret != chunk_len)
+			return offset + ret;
 	}
 
 	return count;
diff --git a/include/linux/wwan.h b/include/linux/wwan.h
index 5ce2acf444fb..cf58b3479cb2 100644
--- a/include/linux/wwan.h
+++ b/include/linux/wwan.h
@@ -46,6 +46,8 @@  struct wwan_port;
  * @tx: Non-blocking routine that sends WWAN port protocol data to the device.
  * @tx_blocking: Optional blocking routine that sends WWAN port protocol data
  *               to the device.
+ * @get_tx_rsvd_headroom: Optional routine that sets reserve headroom of skb.
+ * @get_tx_chunk_len: Optional routine that sets chunk len to split.
  * @tx_poll: Optional routine that sets additional TX poll flags.
  *
  * The wwan_port_ops structure contains a list of low-level operations
@@ -58,6 +60,8 @@  struct wwan_port_ops {
 
 	/* Optional operations */
 	int (*tx_blocking)(struct wwan_port *port, struct sk_buff *skb);
+	size_t (*get_tx_rsvd_headroom)(struct wwan_port *port);
+	size_t (*get_tx_chunk_len)(struct wwan_port *port);
 	__poll_t (*tx_poll)(struct wwan_port *port, struct file *filp,
 			    poll_table *wait);
 };
@@ -112,7 +116,6 @@  void wwan_port_rx(struct wwan_port *port, struct sk_buff *skb);
  */
 void wwan_port_txoff(struct wwan_port *port);
 
-
 /**
  * wwan_port_txon - Restart TX on WWAN port
  * @port: WWAN port for which TX must be restarted