@@ -83,6 +83,7 @@ struct page_pool;
#define MLX5E_SHAMPO_LOG_HEADER_ENTRY_SIZE (8)
#define MLX5E_SHAMPO_LOG_MAX_HEADER_ENTRY_SIZE (9)
#define MLX5E_SHAMPO_WQ_HEADER_PER_PAGE (PAGE_SIZE >> MLX5E_SHAMPO_LOG_MAX_HEADER_ENTRY_SIZE)
+#define MLX5E_SHAMPO_LOG_WQ_HEADER_PER_PAGE (PAGE_SHIFT - MLX5E_SHAMPO_LOG_MAX_HEADER_ENTRY_SIZE)
#define MLX5E_SHAMPO_WQ_BASE_HEAD_ENTRY_SIZE (64)
#define MLX5E_SHAMPO_WQ_RESRV_SIZE (64 * 1024)
#define MLX5E_SHAMPO_WQ_BASE_RESRV_SIZE (4096)
@@ -624,9 +625,7 @@ struct mlx5e_dma_info {
struct mlx5e_shampo_hd {
u32 mkey;
- struct mlx5e_dma_info *info;
struct mlx5e_frag_page *pages;
- u16 curr_page_index;
u32 hd_per_wq;
u16 hd_per_wqe;
u16 pages_per_wq;
@@ -350,19 +350,15 @@ static int mlx5e_rq_shampo_hd_info_alloc(struct mlx5e_rq *rq, int node)
shampo->bitmap = bitmap_zalloc_node(shampo->hd_per_wq, GFP_KERNEL,
node);
- shampo->info = kvzalloc_node(array_size(shampo->hd_per_wq,
- sizeof(*shampo->info)),
- GFP_KERNEL, node);
shampo->pages = kvzalloc_node(array_size(shampo->hd_per_wq,
sizeof(*shampo->pages)),
GFP_KERNEL, node);
- if (!shampo->bitmap || !shampo->info || !shampo->pages)
+ if (!shampo->bitmap || !shampo->pages)
goto err_nomem;
return 0;
err_nomem:
- kvfree(shampo->info);
kvfree(shampo->bitmap);
kvfree(shampo->pages);
@@ -372,7 +368,6 @@ static int mlx5e_rq_shampo_hd_info_alloc(struct mlx5e_rq *rq, int node)
static void mlx5e_rq_shampo_hd_info_free(struct mlx5e_rq *rq)
{
kvfree(rq->mpwqe.shampo->bitmap);
- kvfree(rq->mpwqe.shampo->info);
kvfree(rq->mpwqe.shampo->pages);
}
@@ -643,6 +643,21 @@ static void build_ksm_umr(struct mlx5e_icosq *sq, struct mlx5e_umr_wqe *umr_wqe,
umr_wqe->uctrl.mkey_mask = cpu_to_be64(MLX5_MKEY_MASK_FREE);
}
+static struct mlx5e_frag_page *mlx5e_shampo_hd_to_frag_page(struct mlx5e_rq *rq, int header_index)
+{
+ BUILD_BUG_ON(MLX5E_SHAMPO_LOG_MAX_HEADER_ENTRY_SIZE > PAGE_SHIFT);
+
+ return &rq->mpwqe.shampo->pages[header_index >> MLX5E_SHAMPO_LOG_WQ_HEADER_PER_PAGE];
+}
+
+static u64 mlx5e_shampo_hd_offset(int header_index)
+{
+ return (header_index & (MLX5E_SHAMPO_WQ_HEADER_PER_PAGE - 1)) <<
+ MLX5E_SHAMPO_LOG_MAX_HEADER_ENTRY_SIZE;
+}
+
+static void mlx5e_free_rx_shampo_hd_entry(struct mlx5e_rq *rq, u16 header_index);
+
static int mlx5e_build_shampo_hd_umr(struct mlx5e_rq *rq,
struct mlx5e_icosq *sq,
u16 ksm_entries, u16 index)
@@ -650,9 +665,6 @@ static int mlx5e_build_shampo_hd_umr(struct mlx5e_rq *rq,
struct mlx5e_shampo_hd *shampo = rq->mpwqe.shampo;
u16 pi, header_offset, err, wqe_bbs;
u32 lkey = rq->mdev->mlx5e_res.hw_objs.mkey;
- u16 page_index = shampo->curr_page_index;
- struct mlx5e_frag_page *frag_page = NULL;
- struct mlx5e_dma_info *dma_info;
struct mlx5e_umr_wqe *umr_wqe;
int headroom, i;
u64 addr = 0;
@@ -665,29 +677,20 @@ static int mlx5e_build_shampo_hd_umr(struct mlx5e_rq *rq,
WARN_ON_ONCE(ksm_entries & (MLX5E_SHAMPO_WQ_HEADER_PER_PAGE - 1));
for (i = 0; i < ksm_entries; i++, index++) {
- dma_info = &shampo->info[index];
- header_offset = (index & (MLX5E_SHAMPO_WQ_HEADER_PER_PAGE - 1)) <<
- MLX5E_SHAMPO_LOG_MAX_HEADER_ENTRY_SIZE;
- if (!(header_offset & (PAGE_SIZE - 1))) {
- frag_page = &shampo->pages[page_index];
- page_index = (page_index + 1) & (shampo->pages_per_wq - 1);
+ header_offset = mlx5e_shampo_hd_offset(index);
+ if (!header_offset) {
+ struct mlx5e_frag_page *frag_page = mlx5e_shampo_hd_to_frag_page(rq, index);
err = mlx5e_page_alloc_fragmented(rq, frag_page);
if (unlikely(err))
goto err_unmap;
addr = page_pool_get_dma_addr(frag_page->page);
-
- dma_info->addr = addr;
- dma_info->frag_page = frag_page;
- } else {
- dma_info->addr = addr + header_offset;
- dma_info->frag_page = frag_page;
}
umr_wqe->inline_ksms[i] = (struct mlx5_ksm) {
.key = cpu_to_be32(lkey),
- .va = cpu_to_be64(dma_info->addr + headroom),
+ .va = cpu_to_be64(addr + header_offset + headroom),
};
}
@@ -698,20 +701,22 @@ static int mlx5e_build_shampo_hd_umr(struct mlx5e_rq *rq,
};
shampo->pi = (shampo->pi + ksm_entries) & (shampo->hd_per_wq - 1);
- shampo->curr_page_index = page_index;
sq->pc += wqe_bbs;
sq->doorbell_cseg = &umr_wqe->ctrl;
return 0;
err_unmap:
- while (--i >= 0) {
- dma_info = &shampo->info[--index];
- if (!(i & (MLX5E_SHAMPO_WQ_HEADER_PER_PAGE - 1))) {
- dma_info->addr = ALIGN_DOWN(dma_info->addr, PAGE_SIZE);
- mlx5e_page_release_fragmented(rq, dma_info->frag_page);
+ while (--i) {
+ --index;
+ header_offset = mlx5e_shampo_hd_offset(index);
+ if (!header_offset) {
+ struct mlx5e_frag_page *frag_page = mlx5e_shampo_hd_to_frag_page(rq, index);
+
+ mlx5e_page_release_fragmented(rq, frag_page);
}
}
+
rq->stats->buff_alloc_err++;
return err;
}
@@ -844,13 +849,11 @@ static void
mlx5e_free_rx_shampo_hd_entry(struct mlx5e_rq *rq, u16 header_index)
{
struct mlx5e_shampo_hd *shampo = rq->mpwqe.shampo;
- u64 addr = shampo->info[header_index].addr;
if (((header_index + 1) & (MLX5E_SHAMPO_WQ_HEADER_PER_PAGE - 1)) == 0) {
- struct mlx5e_dma_info *dma_info = &shampo->info[header_index];
+ struct mlx5e_frag_page *frag_page = mlx5e_shampo_hd_to_frag_page(rq, header_index);
- dma_info->addr = ALIGN_DOWN(addr, PAGE_SIZE);
- mlx5e_page_release_fragmented(rq, dma_info->frag_page);
+ mlx5e_page_release_fragmented(rq, frag_page);
}
clear_bit(header_index, shampo->bitmap);
}
@@ -1204,10 +1207,10 @@ static void mlx5e_lro_update_hdr(struct sk_buff *skb, struct mlx5_cqe64 *cqe,
static void *mlx5e_shampo_get_packet_hd(struct mlx5e_rq *rq, u16 header_index)
{
- struct mlx5e_dma_info *last_head = &rq->mpwqe.shampo->info[header_index];
- u16 head_offset = (last_head->addr & (PAGE_SIZE - 1)) + rq->buff.headroom;
+ struct mlx5e_frag_page *frag_page = mlx5e_shampo_hd_to_frag_page(rq, header_index);
+ u16 head_offset = mlx5e_shampo_hd_offset(header_index) + rq->buff.headroom;
- return page_address(last_head->frag_page->page) + head_offset;
+ return page_address(frag_page->page) + head_offset;
}
static void mlx5e_shampo_update_ipv4_udp_hdr(struct mlx5e_rq *rq, struct iphdr *ipv4)
@@ -2178,29 +2181,30 @@ static struct sk_buff *
mlx5e_skb_from_cqe_shampo(struct mlx5e_rq *rq, struct mlx5e_mpw_info *wi,
struct mlx5_cqe64 *cqe, u16 header_index)
{
- struct mlx5e_dma_info *head = &rq->mpwqe.shampo->info[header_index];
- u16 head_offset = head->addr & (PAGE_SIZE - 1);
+ struct mlx5e_frag_page *frag_page = mlx5e_shampo_hd_to_frag_page(rq, header_index);
+ dma_addr_t page_dma_addr = page_pool_get_dma_addr(frag_page->page);
+ u16 head_offset = mlx5e_shampo_hd_offset(header_index);
+ dma_addr_t dma_addr = page_dma_addr + head_offset;
u16 head_size = cqe->shampo.header_size;
u16 rx_headroom = rq->buff.headroom;
struct sk_buff *skb = NULL;
void *hdr, *data;
u32 frag_size;
- hdr = page_address(head->frag_page->page) + head_offset;
+ hdr = page_address(frag_page->page) + head_offset;
data = hdr + rx_headroom;
frag_size = MLX5_SKB_FRAG_SZ(rx_headroom + head_size);
if (likely(frag_size <= BIT(MLX5E_SHAMPO_LOG_MAX_HEADER_ENTRY_SIZE))) {
/* build SKB around header */
- dma_sync_single_range_for_cpu(rq->pdev, head->addr, 0, frag_size, rq->buff.map_dir);
+ dma_sync_single_range_for_cpu(rq->pdev, dma_addr, 0, frag_size, rq->buff.map_dir);
net_prefetchw(hdr);
net_prefetch(data);
skb = mlx5e_build_linear_skb(rq, hdr, frag_size, rx_headroom, head_size, 0);
-
if (unlikely(!skb))
return NULL;
- head->frag_page->frags++;
+ frag_page->frags++;
} else {
/* allocate SKB and copy header for large header */
rq->stats->gro_large_hds++;
@@ -2212,7 +2216,7 @@ mlx5e_skb_from_cqe_shampo(struct mlx5e_rq *rq, struct mlx5e_mpw_info *wi,
}
net_prefetchw(skb->data);
- mlx5e_copy_skb_header(rq, skb, head->frag_page->page, head->addr,
+ mlx5e_copy_skb_header(rq, skb, frag_page->page, dma_addr,
head_offset + rx_headroom,
rx_headroom, head_size);
/* skb linear part was allocated with headlen and aligned to long */