Message ID | 20221123205219.31748-4-anirudh.venkataramanan@intel.com (mailing list archive) |
---|---|
State | Accepted |
Commit | c191445874bba16a636a88fc4afc7c3ab09228fd |
Delegated to: | Netdev Maintainers |
Headers | show |
Series | Remove uses of kmap_atomic() | expand |
On mercoledì 23 novembre 2022 21:52:16 CET Anirudh Venkataramanan wrote: > Pages for Rx buffers are allocated in cas_page_alloc() using either > GFP_ATOMIC or GFP_KERNEL. Memory allocated with GFP_KERNEL/GFP_ATOMIC can't > come from highmem and so there's no need to kmap() them. Just use > page_address() instead. This makes the variable 'addr' unnecessary, so > remove it too. > > Note that kmap_atomic() disables preemption and page-fault processing, > but page_address() doesn't. When removing uses of kmap_atomic(), one has to > check if the code being executed between the map/unmap implicitly depends > on page-faults and/or preemption being disabled. If yes, then code to > disable page-faults and/or preemption should also be added for functional > correctness. That however doesn't appear to be the case here, so just > page_address() is used. > > I don't have hardware, so this change has only been compile tested. > > Cc: Ira Weiny <ira.weiny@intel.com> > Cc: Fabio M. De Francesco <fmdefrancesco@gmail.com> > Signed-off-by: Anirudh Venkataramanan <anirudh.venkataramanan@intel.com> > --- > v1 -> v2: Update commit message > --- > drivers/net/ethernet/sun/cassini.c | 34 ++++++++++-------------------- > 1 file changed, 11 insertions(+), 23 deletions(-) Reviewed-by: Fabio M. De Francesco <fmdefrancesco@gmail.com> Thanks, Fabio > diff --git a/drivers/net/ethernet/sun/cassini.c > b/drivers/net/ethernet/sun/cassini.c index 0aca193..2f66cfc 100644 > --- a/drivers/net/ethernet/sun/cassini.c > +++ b/drivers/net/ethernet/sun/cassini.c > @@ -1915,7 +1915,7 @@ static int cas_rx_process_pkt(struct cas *cp, struct > cas_rx_comp *rxc, int off, swivel = RX_SWIVEL_OFF_VAL; > struct cas_page *page; > struct sk_buff *skb; > - void *addr, *crcaddr; > + void *crcaddr; > __sum16 csum; > char *p; > > @@ -1936,7 +1936,7 @@ static int cas_rx_process_pkt(struct cas *cp, struct > cas_rx_comp *rxc, skb_reserve(skb, swivel); > > p = skb->data; > - addr = crcaddr = NULL; > + crcaddr = NULL; > if (hlen) { /* always copy header pages */ > i = CAS_VAL(RX_COMP2_HDR_INDEX, words[1]); > page = cp->rx_pages[CAS_VAL(RX_INDEX_RING, i)] [CAS_VAL(RX_INDEX_NUM, i)]; > @@ -1948,12 +1948,10 @@ static int cas_rx_process_pkt(struct cas *cp, struct > cas_rx_comp *rxc, i += cp->crc_size; > dma_sync_single_for_cpu(&cp->pdev->dev, page->dma_addr + off, > i, DMA_FROM_DEVICE); > - addr = cas_page_map(page->buffer); > - memcpy(p, addr + off, i); > + memcpy(p, page_address(page->buffer) + off, i); > dma_sync_single_for_device(&cp->pdev->dev, > page->dma_addr + off, i, > DMA_FROM_DEVICE); > - cas_page_unmap(addr); > RX_USED_ADD(page, 0x100); > p += hlen; > swivel = 0; > @@ -1984,12 +1982,11 @@ static int cas_rx_process_pkt(struct cas *cp, struct > cas_rx_comp *rxc, /* make sure we always copy a header */ > swivel = 0; > if (p == (char *) skb->data) { /* not split */ > - addr = cas_page_map(page->buffer); > - memcpy(p, addr + off, RX_COPY_MIN); > + memcpy(p, page_address(page->buffer) + off, > + RX_COPY_MIN); > dma_sync_single_for_device(&cp->pdev->dev, > page->dma_addr + off, i, > DMA_FROM_DEVICE); > - cas_page_unmap(addr); > off += RX_COPY_MIN; > swivel = RX_COPY_MIN; > RX_USED_ADD(page, cp->mtu_stride); > @@ -2036,10 +2033,8 @@ static int cas_rx_process_pkt(struct cas *cp, struct > cas_rx_comp *rxc, RX_USED_ADD(page, hlen + cp->crc_size); > } > > - if (cp->crc_size) { > - addr = cas_page_map(page->buffer); > - crcaddr = addr + off + hlen; > - } > + if (cp->crc_size) > + crcaddr = page_address(page->buffer) + off + hlen; > > } else { > /* copying packet */ > @@ -2061,12 +2056,10 @@ static int cas_rx_process_pkt(struct cas *cp, struct > cas_rx_comp *rxc, i += cp->crc_size; > dma_sync_single_for_cpu(&cp->pdev->dev, page->dma_addr + off, > i, DMA_FROM_DEVICE); > - addr = cas_page_map(page->buffer); > - memcpy(p, addr + off, i); > + memcpy(p, page_address(page->buffer) + off, i); > dma_sync_single_for_device(&cp->pdev->dev, > page->dma_addr + off, i, > DMA_FROM_DEVICE); > - cas_page_unmap(addr); > if (p == (char *) skb->data) /* not split */ > RX_USED_ADD(page, cp->mtu_stride); > else > @@ -2081,20 +2074,17 @@ static int cas_rx_process_pkt(struct cas *cp, struct > cas_rx_comp *rxc, page->dma_addr, > dlen + cp- >crc_size, > DMA_FROM_DEVICE); > - addr = cas_page_map(page->buffer); > - memcpy(p, addr, dlen + cp->crc_size); > + memcpy(p, page_address(page->buffer), dlen + cp- >crc_size); > dma_sync_single_for_device(&cp->pdev->dev, > page->dma_addr, > dlen + cp- >crc_size, > DMA_FROM_DEVICE); > - cas_page_unmap(addr); > RX_USED_ADD(page, dlen + cp->crc_size); > } > end_copy_pkt: > - if (cp->crc_size) { > - addr = NULL; > + if (cp->crc_size) > crcaddr = skb->data + alloclen; > - } > + > skb_put(skb, alloclen); > } > > @@ -2103,8 +2093,6 @@ static int cas_rx_process_pkt(struct cas *cp, struct > cas_rx_comp *rxc, /* checksum includes FCS. strip it out. */ > csum = csum_fold(csum_partial(crcaddr, cp->crc_size, > csum_unfold(csum))); > - if (addr) > - cas_page_unmap(addr); > } > skb->protocol = eth_type_trans(skb, cp->dev); > if (skb->protocol == htons(ETH_P_IP)) { > -- > 2.37.2
diff --git a/drivers/net/ethernet/sun/cassini.c b/drivers/net/ethernet/sun/cassini.c index 0aca193..2f66cfc 100644 --- a/drivers/net/ethernet/sun/cassini.c +++ b/drivers/net/ethernet/sun/cassini.c @@ -1915,7 +1915,7 @@ static int cas_rx_process_pkt(struct cas *cp, struct cas_rx_comp *rxc, int off, swivel = RX_SWIVEL_OFF_VAL; struct cas_page *page; struct sk_buff *skb; - void *addr, *crcaddr; + void *crcaddr; __sum16 csum; char *p; @@ -1936,7 +1936,7 @@ static int cas_rx_process_pkt(struct cas *cp, struct cas_rx_comp *rxc, skb_reserve(skb, swivel); p = skb->data; - addr = crcaddr = NULL; + crcaddr = NULL; if (hlen) { /* always copy header pages */ i = CAS_VAL(RX_COMP2_HDR_INDEX, words[1]); page = cp->rx_pages[CAS_VAL(RX_INDEX_RING, i)][CAS_VAL(RX_INDEX_NUM, i)]; @@ -1948,12 +1948,10 @@ static int cas_rx_process_pkt(struct cas *cp, struct cas_rx_comp *rxc, i += cp->crc_size; dma_sync_single_for_cpu(&cp->pdev->dev, page->dma_addr + off, i, DMA_FROM_DEVICE); - addr = cas_page_map(page->buffer); - memcpy(p, addr + off, i); + memcpy(p, page_address(page->buffer) + off, i); dma_sync_single_for_device(&cp->pdev->dev, page->dma_addr + off, i, DMA_FROM_DEVICE); - cas_page_unmap(addr); RX_USED_ADD(page, 0x100); p += hlen; swivel = 0; @@ -1984,12 +1982,11 @@ static int cas_rx_process_pkt(struct cas *cp, struct cas_rx_comp *rxc, /* make sure we always copy a header */ swivel = 0; if (p == (char *) skb->data) { /* not split */ - addr = cas_page_map(page->buffer); - memcpy(p, addr + off, RX_COPY_MIN); + memcpy(p, page_address(page->buffer) + off, + RX_COPY_MIN); dma_sync_single_for_device(&cp->pdev->dev, page->dma_addr + off, i, DMA_FROM_DEVICE); - cas_page_unmap(addr); off += RX_COPY_MIN; swivel = RX_COPY_MIN; RX_USED_ADD(page, cp->mtu_stride); @@ -2036,10 +2033,8 @@ static int cas_rx_process_pkt(struct cas *cp, struct cas_rx_comp *rxc, RX_USED_ADD(page, hlen + cp->crc_size); } - if (cp->crc_size) { - addr = cas_page_map(page->buffer); - crcaddr = addr + off + hlen; - } + if (cp->crc_size) + crcaddr = page_address(page->buffer) + off + hlen; } else { /* copying packet */ @@ -2061,12 +2056,10 @@ static int cas_rx_process_pkt(struct cas *cp, struct cas_rx_comp *rxc, i += cp->crc_size; dma_sync_single_for_cpu(&cp->pdev->dev, page->dma_addr + off, i, DMA_FROM_DEVICE); - addr = cas_page_map(page->buffer); - memcpy(p, addr + off, i); + memcpy(p, page_address(page->buffer) + off, i); dma_sync_single_for_device(&cp->pdev->dev, page->dma_addr + off, i, DMA_FROM_DEVICE); - cas_page_unmap(addr); if (p == (char *) skb->data) /* not split */ RX_USED_ADD(page, cp->mtu_stride); else @@ -2081,20 +2074,17 @@ static int cas_rx_process_pkt(struct cas *cp, struct cas_rx_comp *rxc, page->dma_addr, dlen + cp->crc_size, DMA_FROM_DEVICE); - addr = cas_page_map(page->buffer); - memcpy(p, addr, dlen + cp->crc_size); + memcpy(p, page_address(page->buffer), dlen + cp->crc_size); dma_sync_single_for_device(&cp->pdev->dev, page->dma_addr, dlen + cp->crc_size, DMA_FROM_DEVICE); - cas_page_unmap(addr); RX_USED_ADD(page, dlen + cp->crc_size); } end_copy_pkt: - if (cp->crc_size) { - addr = NULL; + if (cp->crc_size) crcaddr = skb->data + alloclen; - } + skb_put(skb, alloclen); } @@ -2103,8 +2093,6 @@ static int cas_rx_process_pkt(struct cas *cp, struct cas_rx_comp *rxc, /* checksum includes FCS. strip it out. */ csum = csum_fold(csum_partial(crcaddr, cp->crc_size, csum_unfold(csum))); - if (addr) - cas_page_unmap(addr); } skb->protocol = eth_type_trans(skb, cp->dev); if (skb->protocol == htons(ETH_P_IP)) {
Pages for Rx buffers are allocated in cas_page_alloc() using either GFP_ATOMIC or GFP_KERNEL. Memory allocated with GFP_KERNEL/GFP_ATOMIC can't come from highmem and so there's no need to kmap() them. Just use page_address() instead. This makes the variable 'addr' unnecessary, so remove it too. Note that kmap_atomic() disables preemption and page-fault processing, but page_address() doesn't. When removing uses of kmap_atomic(), one has to check if the code being executed between the map/unmap implicitly depends on page-faults and/or preemption being disabled. If yes, then code to disable page-faults and/or preemption should also be added for functional correctness. That however doesn't appear to be the case here, so just page_address() is used. I don't have hardware, so this change has only been compile tested. Cc: Ira Weiny <ira.weiny@intel.com> Cc: Fabio M. De Francesco <fmdefrancesco@gmail.com> Signed-off-by: Anirudh Venkataramanan <anirudh.venkataramanan@intel.com> --- v1 -> v2: Update commit message --- drivers/net/ethernet/sun/cassini.c | 34 ++++++++++-------------------- 1 file changed, 11 insertions(+), 23 deletions(-)