@@ -97,8 +97,7 @@ static int t7xx_cldma_alloc_and_map_skb(struct cldma_ctrl *md_ctrl, struct cldma
if (!req->skb)
return -ENOMEM;
- req->mapped_buff = dma_map_single(md_ctrl->dev, req->skb->data,
- skb_data_area_size(req->skb), DMA_FROM_DEVICE);
+ req->mapped_buff = dma_map_single(md_ctrl->dev, req->skb->data, size, DMA_FROM_DEVICE);
if (dma_mapping_error(md_ctrl->dev, req->mapped_buff)) {
dev_kfree_skb_any(req->skb);
req->skb = NULL;
@@ -154,7 +153,7 @@ static int t7xx_cldma_gpd_rx_from_q(struct cldma_queue *queue, int budget, bool
if (req->mapped_buff) {
dma_unmap_single(md_ctrl->dev, req->mapped_buff,
- skb_data_area_size(skb), DMA_FROM_DEVICE);
+ queue->tr_ring->pkt_size, DMA_FROM_DEVICE);
req->mapped_buff = 0;
}
@@ -376,7 +375,7 @@ static void t7xx_cldma_ring_free(struct cldma_ctrl *md_ctrl,
list_for_each_entry_safe(req_cur, req_next, &ring->gpd_ring, entry) {
if (req_cur->mapped_buff && req_cur->skb) {
dma_unmap_single(md_ctrl->dev, req_cur->mapped_buff,
- skb_data_area_size(req_cur->skb), tx_rx);
+ ring->pkt_size, tx_rx);
req_cur->mapped_buff = 0;
}
@@ -151,14 +151,12 @@ static bool t7xx_alloc_and_map_skb_info(const struct dpmaif_ctrl *dpmaif_ctrl,
{
dma_addr_t data_bus_addr;
struct sk_buff *skb;
- size_t data_len;
skb = __dev_alloc_skb(size, GFP_KERNEL);
if (!skb)
return false;
- data_len = skb_data_area_size(skb);
- data_bus_addr = dma_map_single(dpmaif_ctrl->dev, skb->data, data_len, DMA_FROM_DEVICE);
+ data_bus_addr = dma_map_single(dpmaif_ctrl->dev, skb->data, size, DMA_FROM_DEVICE);
if (dma_mapping_error(dpmaif_ctrl->dev, data_bus_addr)) {
dev_err_ratelimited(dpmaif_ctrl->dev, "DMA mapping error\n");
dev_kfree_skb_any(skb);
@@ -167,7 +165,7 @@ static bool t7xx_alloc_and_map_skb_info(const struct dpmaif_ctrl *dpmaif_ctrl,
cur_skb->skb = skb;
cur_skb->data_bus_addr = data_bus_addr;
- cur_skb->data_len = data_len;
+ cur_skb->data_len = size;
return true;
}