@@ -1658,7 +1658,7 @@ static void chcr_ktls_copy_record_in_skb(struct sk_buff *nskb,
for (i = 0; i < record->num_frags; i++) {
skb_shinfo(nskb)->frags[i] = record->frags[i];
/* increase the frag ref count */
- __skb_frag_ref(&skb_shinfo(nskb)->frags[i]);
+ __skb_frag_ref(&skb_shinfo(nskb)->frags[i], nskb->pp_recycle);
}
skb_shinfo(nskb)->nr_frags = record->num_frags;
@@ -1999,7 +1999,7 @@ static int cas_rx_process_pkt(struct cas *cp, struct cas_rx_comp *rxc,
skb->len += hlen - swivel;
skb_frag_fill_page_desc(frag, page->buffer, off, hlen - swivel);
- __skb_frag_ref(frag);
+ __skb_frag_ref(frag, skb->pp_recycle);
/* any more data? */
if ((words[0] & RX_COMP1_SPLIT_PKT) && ((dlen -= hlen) > 0)) {
@@ -2023,7 +2023,7 @@ static int cas_rx_process_pkt(struct cas *cp, struct cas_rx_comp *rxc,
frag++;
skb_frag_fill_page_desc(frag, page->buffer, 0, hlen);
- __skb_frag_ref(frag);
+ __skb_frag_ref(frag, skb->pp_recycle);
RX_USED_ADD(page, hlen + cp->crc_size);
}
@@ -716,7 +716,7 @@ static void veth_xdp_get(struct xdp_buff *xdp)
return;
for (i = 0; i < sinfo->nr_frags; i++)
- __skb_frag_ref(&sinfo->frags[i]);
+ __skb_frag_ref(&sinfo->frags[i], false);
}
static int veth_convert_skb_to_xdp_buff(struct veth_rq *rq,
@@ -3483,15 +3483,29 @@ static inline struct page *skb_frag_page(const skb_frag_t *frag)
return netmem_to_page(frag->netmem);
}
+bool napi_pp_get_page(struct page *page);
+
+static inline void skb_page_ref(struct page *page, bool recycle)
+{
+#ifdef CONFIG_PAGE_POOL
+ if (recycle && napi_pp_get_page(page))
+ return;
+#endif
+ get_page(page);
+}
+
/**
* __skb_frag_ref - take an addition reference on a paged fragment.
* @frag: the paged fragment
+ * @recycle: skb->pp_recycle param of the parent skb. False if no parent skb.
*
- * Takes an additional reference on the paged fragment @frag.
+ * Takes an additional reference on the paged fragment @frag. Obtains the
+ * correct reference count depending on whether skb->pp_recycle is set and
+ * whether the frag is a page pool frag.
*/
-static inline void __skb_frag_ref(skb_frag_t *frag)
+static inline void __skb_frag_ref(skb_frag_t *frag, bool recycle)
{
- get_page(skb_frag_page(frag));
+ skb_page_ref(skb_frag_page(frag), recycle);
}
/**
@@ -3503,7 +3517,7 @@ static inline void __skb_frag_ref(skb_frag_t *frag)
*/
static inline void skb_frag_ref(struct sk_buff *skb, int f)
{
- __skb_frag_ref(&skb_shinfo(skb)->frags[f]);
+ __skb_frag_ref(&skb_shinfo(skb)->frags[f], skb->pp_recycle);
}
int skb_pp_cow_data(struct page_pool *pool, struct sk_buff **pskb,
@@ -1004,6 +1004,18 @@ int skb_cow_data_for_xdp(struct page_pool *pool, struct sk_buff **pskb,
EXPORT_SYMBOL(skb_cow_data_for_xdp);
#if IS_ENABLED(CONFIG_PAGE_POOL)
+bool napi_pp_get_page(struct page *page)
+{
+ page = compound_head(page);
+
+ if (!is_pp_page(page))
+ return false;
+
+ page_pool_ref_page(page);
+ return true;
+}
+EXPORT_SYMBOL(napi_pp_get_page);
+
bool napi_pp_put_page(struct page *page)
{
page = compound_head(page);
@@ -1032,37 +1044,6 @@ static bool skb_pp_recycle(struct sk_buff *skb, void *data)
return napi_pp_put_page(virt_to_page(data));
}
-/**
- * skb_pp_frag_ref() - Increase fragment references of a page pool aware skb
- * @skb: page pool aware skb
- *
- * Increase the fragment reference count (pp_ref_count) of a skb. This is
- * intended to gain fragment references only for page pool aware skbs,
- * i.e. when skb->pp_recycle is true, and not for fragments in a
- * non-pp-recycling skb. It has a fallback to increase references on normal
- * pages, as page pool aware skbs may also have normal page fragments.
- */
-static int skb_pp_frag_ref(struct sk_buff *skb)
-{
- struct skb_shared_info *shinfo;
- struct page *head_page;
- int i;
-
- if (!skb->pp_recycle)
- return -EINVAL;
-
- shinfo = skb_shinfo(skb);
-
- for (i = 0; i < shinfo->nr_frags; i++) {
- head_page = compound_head(skb_frag_page(&shinfo->frags[i]));
- if (likely(is_pp_page(head_page)))
- page_pool_ref_page(head_page);
- else
- page_ref_inc(head_page);
- }
- return 0;
-}
-
static void skb_kfree_head(void *head, unsigned int end_offset)
{
if (end_offset == SKB_SMALL_HEAD_HEADROOM)
@@ -4169,7 +4150,7 @@ int skb_shift(struct sk_buff *tgt, struct sk_buff *skb, int shiftlen)
to++;
} else {
- __skb_frag_ref(fragfrom);
+ __skb_frag_ref(fragfrom, skb->pp_recycle);
skb_frag_page_copy(fragto, fragfrom);
skb_frag_off_copy(fragto, fragfrom);
skb_frag_size_set(fragto, todo);
@@ -4819,7 +4800,7 @@ struct sk_buff *skb_segment(struct sk_buff *head_skb,
}
*nskb_frag = (i < 0) ? skb_head_frag_to_page_desc(frag_skb) : *frag;
- __skb_frag_ref(nskb_frag);
+ __skb_frag_ref(nskb_frag, nskb->pp_recycle);
size = skb_frag_size(nskb_frag);
if (pos < offset) {
@@ -5950,10 +5931,8 @@ bool skb_try_coalesce(struct sk_buff *to, struct sk_buff *from,
/* if the skb is not cloned this does nothing
* since we set nr_frags to 0.
*/
- if (skb_pp_frag_ref(from)) {
- for (i = 0; i < from_shinfo->nr_frags; i++)
- __skb_frag_ref(&from_shinfo->frags[i]);
- }
+ for (i = 0; i < from_shinfo->nr_frags; i++)
+ __skb_frag_ref(&from_shinfo->frags[i], from->pp_recycle);
to->truesize += delta;
to->len += len;
@@ -277,7 +277,7 @@ static int fill_sg_in(struct scatterlist *sg_in,
for (i = 0; remaining > 0; i++) {
skb_frag_t *frag = &record->frags[i];
- __skb_frag_ref(frag);
+ __skb_frag_ref(frag, false);
sg_set_page(sg_in + i, skb_frag_page(frag),
skb_frag_size(frag), skb_frag_off(frag));