Message ID | 20210114142321.2594697-2-liuhangbin@gmail.com (mailing list archive) |
---|---|
State | Superseded |
Delegated to: | BPF |
Headers | show |
Series | xdp: add a new helper for dev map multicast support | expand |
Context | Check | Description |
---|---|---|
netdev/cover_letter | success | Link |
netdev/fixes_present | success | Link |
netdev/patch_count | success | Link |
netdev/tree_selection | success | Clearly marked for bpf-next |
netdev/subject_prefix | success | Link |
netdev/cc_maintainers | warning | 9 maintainers not CCed: kpsingh@kernel.org davem@davemloft.net andrii@kernel.org yhs@fb.com hawk@kernel.org kuba@kernel.org kafai@fb.com john.fastabend@gmail.com songliubraving@fb.com |
netdev/source_inline | success | Was 0 now: 0 |
netdev/verify_signedoff | success | Link |
netdev/module_param | success | Was 0 now: 0 |
netdev/build_32bit | success | Errors and warnings before: 1 this patch: 1 |
netdev/kdoc | success | Errors and warnings before: 0 this patch: 0 |
netdev/verify_fixes | success | Link |
netdev/checkpatch | success | total: 0 errors, 0 warnings, 0 checks, 192 lines checked |
netdev/build_allmodconfig_warn | success | Errors and warnings before: 1 this patch: 1 |
netdev/header_inline | success | Link |
netdev/stable | success | Stable not CCed |
Hangbin Liu wrote: > From: Jesper Dangaard Brouer <brouer@redhat.com> > > This changes the devmap XDP program support to run the program when the > bulk queue is flushed instead of before the frame is enqueued. This has > a couple of benefits: > > - It "sorts" the packets by destination devmap entry, and then runs the > same BPF program on all the packets in sequence. This ensures that we > keep the XDP program and destination device properties hot in I-cache. > > - It makes the multicast implementation simpler because it can just > enqueue packets using bq_enqueue() without having to deal with the > devmap program at all. > > The drawback is that if the devmap program drops the packet, the enqueue > step is redundant. However, arguably this is mostly visible in a > micro-benchmark, and with more mixed traffic the I-cache benefit should > win out. The performance impact of just this patch is as follows: > > Using xdp_redirect_map(with a 2nd xdp_prog patch[1]) in sample/bpf and send > pkts via pktgen cmd: > ./pktgen_sample03_burst_single_flow.sh -i eno1 -d $dst_ip -m $dst_mac -t 10 -s 64 > > There are about +/- 0.1M deviation for native testing, the performance > improved for the base-case, but some drop back with xdp devmap prog attached. > > Version | Test | Generic | Native | Native + 2nd xdp_prog > 5.10 rc6 | xdp_redirect_map i40e->i40e | 2.0M | 9.1M | 8.0M > 5.10 rc6 | xdp_redirect_map i40e->veth | 1.7M | 11.0M | 9.7M > 5.10 rc6 + patch | xdp_redirect_map i40e->i40e | 2.0M | 9.5M | 7.5M > 5.10 rc6 + patch | xdp_redirect_map i40e->veth | 1.7M | 11.6M | 9.1M > > [1] https://patchwork.ozlabs.org/project/netdev/patch/20201208120159.2278277-1-liuhangbin@gmail.com/ > > Signed-off-by: Jesper Dangaard Brouer <brouer@redhat.com> > Signed-off-by: Hangbin Liu <liuhangbin@gmail.com> > > -- > v14: no update, only rebase the code > v13: pass in xdp_prog through __xdp_enqueue() > v2-v12: no this patch > --- > kernel/bpf/devmap.c | 115 +++++++++++++++++++++++++++----------------- > 1 file changed, 72 insertions(+), 43 deletions(-) > > diff --git a/kernel/bpf/devmap.c b/kernel/bpf/devmap.c > index f6e9c68afdd4..84fe15950e44 100644 > --- a/kernel/bpf/devmap.c > +++ b/kernel/bpf/devmap.c > @@ -57,6 +57,7 @@ struct xdp_dev_bulk_queue { > struct list_head flush_node; > struct net_device *dev; > struct net_device *dev_rx; > + struct bpf_prog *xdp_prog; > unsigned int count; > }; > > @@ -327,40 +328,92 @@ bool dev_map_can_have_prog(struct bpf_map *map) > return false; > } > > +static int dev_map_bpf_prog_run(struct bpf_prog *xdp_prog, > + struct xdp_frame **frames, int n, > + struct net_device *dev) > +{ > + struct xdp_txq_info txq = { .dev = dev }; > + struct xdp_buff xdp; > + int i, nframes = 0; > + > + for (i = 0; i < n; i++) { > + struct xdp_frame *xdpf = frames[i]; > + u32 act; > + int err; > + > + xdp_convert_frame_to_buff(xdpf, &xdp); Hi, slightly higher level question about the desgin. How come we have to bounce the xdp_frame back and forth between an xdp_buff<->xdp-frame? Seems a bit wasteful. > + xdp.txq = &txq; > + > + act = bpf_prog_run_xdp(xdp_prog, &xdp); > + switch (act) { > + case XDP_PASS: > + err = xdp_update_frame_from_buff(&xdp, xdpf); xdp_update_frame_from_buff will then convert it back from the xdp_buff? struct xdp_buff { void *data; void *data_end; void *data_meta; void *data_hard_start; struct xdp_rxq_info *rxq; struct xdp_txq_info *txq; u32 frame_sz; /* frame size to deduce data_hard_end/reserved tailroom*/ }; struct xdp_frame { void *data; u16 len; u16 headroom; u32 metasize:8; u32 frame_sz:24; /* Lifetime of xdp_rxq_info is limited to NAPI/enqueue time, * while mem info is valid on remote CPU. */ struct xdp_mem_info mem; struct net_device *dev_rx; /* used by cpumap */ }; It looks like we could embed xdp_buff in xdp_frame and then keep the metadata at the end. Because you are working performance here wdyt? <- @Jesper as well. > + if (unlikely(err < 0)) > + xdp_return_frame_rx_napi(xdpf); > + else > + frames[nframes++] = xdpf; > + break; > + default: > + bpf_warn_invalid_xdp_action(act); > + fallthrough; > + case XDP_ABORTED: > + trace_xdp_exception(dev, xdp_prog, act); > + fallthrough; > + case XDP_DROP: > + xdp_return_frame_rx_napi(xdpf); > + break; > + } > + } > + return n - nframes; /* dropped frames count */ > +} > + > static void bq_xmit_all(struct xdp_dev_bulk_queue *bq, u32 flags) > { > struct net_device *dev = bq->dev; > int sent = 0, drops = 0, err = 0; > + unsigned int cnt = bq->count; > + unsigned int xdp_drop; > int i; > > - if (unlikely(!bq->count)) > + if (unlikely(!cnt)) > return; > > - for (i = 0; i < bq->count; i++) { > + for (i = 0; i < cnt; i++) { > struct xdp_frame *xdpf = bq->q[i]; > > prefetch(xdpf); > } > > - sent = dev->netdev_ops->ndo_xdp_xmit(dev, bq->count, bq->q, flags); > + if (unlikely(bq->xdp_prog)) { Whats the rational for making above unlikely()? Seems for users its not unlikely. Can you measure a performance increase/decrease here? I think its probably fine to just let compiler/prefetcher do its thing here. Or I'm not reading this right, but seems users of bq->xdp_prog would disagree on unlikely case? Either way a comment might be nice to give us some insight in 6 months why we decided this is unlikely. > + xdp_drop = dev_map_bpf_prog_run(bq->xdp_prog, bq->q, cnt, dev); > + cnt -= xdp_drop; > + if (!cnt) { if dev_map_bpf_prog_run() returned sent packets this would read better imo. sent = dev_map_bpf_prog_run(...) if (!sent) goto out; > + sent = 0; > + drops = xdp_drop; > + goto out; > + } > + } > + > + sent = dev->netdev_ops->ndo_xdp_xmit(dev, cnt, bq->q, flags); And, sent = dev->netdev_ops->ndo_xdp_xmit(dev, sent, bq->q, flags); > if (sent < 0) { > err = sent; > sent = 0; > goto error; > } > - drops = bq->count - sent; > + drops = (cnt - sent) + xdp_drop; With about 'sent' logic then drops will still be just, drops = bq->count - sent and move the calculation below the out label and I think you clean up above as well. Did I miss something... > out: > bq->count = 0; > > trace_xdp_devmap_xmit(bq->dev_rx, dev, sent, drops, err); > bq->dev_rx = NULL; > + bq->xdp_prog = NULL; > __list_del_clearprev(&bq->flush_node); > return; > error: > /* If ndo_xdp_xmit fails with an errno, no frames have been > * xmit'ed and it's our responsibility to them free all. > */ > - for (i = 0; i < bq->count; i++) { > + for (i = 0; i < cnt; i++) { > struct xdp_frame *xdpf = bq->q[i]; Patch looks overall good to me, but cleaning up the logic a bit seems like a plus. Thanks, John
On Sun, Jan 17, 2021 at 02:57:02PM -0800, John Fastabend wrote: [...] > It looks like we could embed xdp_buff in xdp_frame and then keep the metadata > at the end. > > Because you are working performance here wdyt? <- @Jesper as well. Leave this question to Jesper. > > > > - sent = dev->netdev_ops->ndo_xdp_xmit(dev, bq->count, bq->q, flags); > > + if (unlikely(bq->xdp_prog)) { > > Whats the rational for making above unlikely()? Seems for users its not > unlikely. Can you measure a performance increase/decrease here? I think > its probably fine to just let compiler/prefetcher do its thing here. Or > I'm not reading this right, but seems users of bq->xdp_prog would disagree > on unlikely case? > > Either way a comment might be nice to give us some insight in 6 months > why we decided this is unlikely. I agree that there is no need to use unlikely() here. > > > + xdp_drop = dev_map_bpf_prog_run(bq->xdp_prog, bq->q, cnt, dev); > > + cnt -= xdp_drop; > > + if (!cnt) { > > > if dev_map_bpf_prog_run() returned sent packets this would read better > imo. > > sent = dev_map_bpf_prog_run(...) > if (!sent) > goto out; > > > + sent = 0; > > + drops = xdp_drop; > > + goto out; > > + } > > + } > > + > > + sent = dev->netdev_ops->ndo_xdp_xmit(dev, cnt, bq->q, flags); > > And, sent = dev->netdev_ops->ndo_xdp_xmit(dev, sent, bq->q, flags); > > > if (sent < 0) { > > err = sent; > > sent = 0; > > goto error; > > } > > - drops = bq->count - sent; > > + drops = (cnt - sent) + xdp_drop; > > With about 'sent' logic then drops will still be just, drops = bq->count - sent > and move the calculation below the out label and I think you clean up above If we use the 'sent' logic, we should also backup the drop value before xmit as the erro label also need it. > as well. Did I miss something... > > > out: > > bq->count = 0; > > > > trace_xdp_devmap_xmit(bq->dev_rx, dev, sent, drops, err); > > bq->dev_rx = NULL; > > + bq->xdp_prog = NULL; > > __list_del_clearprev(&bq->flush_node); > > return; > > error: > > /* If ndo_xdp_xmit fails with an errno, no frames have been > > * xmit'ed and it's our responsibility to them free all. > > */ > > - for (i = 0; i < bq->count; i++) { > > + for (i = 0; i < cnt; i++) { > > struct xdp_frame *xdpf = bq->q[i]; here it will be "for (i = 0; i < cnt - drops; i++)" to free none xmit'ed frames. To make the logic more clear, here is the full code: [...] if (bq->xdp_prog) { sent = dev_map_bpf_prog_run(bq->xdp_prog, bq->q, cnt, dev); if (!sent) goto out; } /* Backup drops value before xmit as we may need it in error label */ drops = cnt - sent; sent = dev->netdev_ops->ndo_xdp_xmit(dev, sent, bq->q, flags); if (sent < 0) { err = sent; sent = 0; goto error; } out: drops = cnt - sent; bq->count = 0; trace_xdp_devmap_xmit(bq->dev_rx, dev, sent, drops, err); bq->dev_rx = NULL; bq->xdp_prog = NULL; __list_del_clearprev(&bq->flush_node); return; error: /* If ndo_xdp_xmit fails with an errno, no frames have been * xmit'ed and it's our responsibility to them free all. */ for (i = 0; i < cnt - drops; i++) { struct xdp_frame *xdpf = bq->q[i]; xdp_return_frame_rx_napi(xdpf); } goto out; } Thanks hangbin
On Mon, 18 Jan 2021 18:07:17 +0800 Hangbin Liu <liuhangbin@gmail.com> wrote: > On Sun, Jan 17, 2021 at 02:57:02PM -0800, John Fastabend wrote: > [...] > > It looks like we could embed xdp_buff in xdp_frame and then keep the metadata > > at the end. > > > > Because you are working performance here wdyt? <- @Jesper as well. > > Leave this question to Jesper. The struct xdp_buff is larger than struct xdp_frame. The size of xdp_frame matters. It is a reserved areas in top of the frame. An XDP BPF-program cannot access this area (and limit headroom grow). This is why this code works, as afterwards xdp_frame is still valid. Looking at the code xdp_update_frame_from_buff() we do seem to update more fields than actually needed. > > > > > > - sent = dev->netdev_ops->ndo_xdp_xmit(dev, bq->count, bq->q, flags); > > > + if (unlikely(bq->xdp_prog)) { > > > > Whats the rational for making above unlikely()? Seems for users its not > > unlikely. Can you measure a performance increase/decrease here? I think > > its probably fine to just let compiler/prefetcher do its thing here. Or > > I'm not reading this right, but seems users of bq->xdp_prog would disagree > > on unlikely case? > > > > Either way a comment might be nice to give us some insight in 6 months > > why we decided this is unlikely. > > I agree that there is no need to use unlikely() here. I added the unlikely() to preserve the baseline performance when not having the 2nd prog loaded. But I'm fine with removing that.
diff --git a/kernel/bpf/devmap.c b/kernel/bpf/devmap.c index f6e9c68afdd4..84fe15950e44 100644 --- a/kernel/bpf/devmap.c +++ b/kernel/bpf/devmap.c @@ -57,6 +57,7 @@ struct xdp_dev_bulk_queue { struct list_head flush_node; struct net_device *dev; struct net_device *dev_rx; + struct bpf_prog *xdp_prog; unsigned int count; }; @@ -327,40 +328,92 @@ bool dev_map_can_have_prog(struct bpf_map *map) return false; } +static int dev_map_bpf_prog_run(struct bpf_prog *xdp_prog, + struct xdp_frame **frames, int n, + struct net_device *dev) +{ + struct xdp_txq_info txq = { .dev = dev }; + struct xdp_buff xdp; + int i, nframes = 0; + + for (i = 0; i < n; i++) { + struct xdp_frame *xdpf = frames[i]; + u32 act; + int err; + + xdp_convert_frame_to_buff(xdpf, &xdp); + xdp.txq = &txq; + + act = bpf_prog_run_xdp(xdp_prog, &xdp); + switch (act) { + case XDP_PASS: + err = xdp_update_frame_from_buff(&xdp, xdpf); + if (unlikely(err < 0)) + xdp_return_frame_rx_napi(xdpf); + else + frames[nframes++] = xdpf; + break; + default: + bpf_warn_invalid_xdp_action(act); + fallthrough; + case XDP_ABORTED: + trace_xdp_exception(dev, xdp_prog, act); + fallthrough; + case XDP_DROP: + xdp_return_frame_rx_napi(xdpf); + break; + } + } + return n - nframes; /* dropped frames count */ +} + static void bq_xmit_all(struct xdp_dev_bulk_queue *bq, u32 flags) { struct net_device *dev = bq->dev; int sent = 0, drops = 0, err = 0; + unsigned int cnt = bq->count; + unsigned int xdp_drop; int i; - if (unlikely(!bq->count)) + if (unlikely(!cnt)) return; - for (i = 0; i < bq->count; i++) { + for (i = 0; i < cnt; i++) { struct xdp_frame *xdpf = bq->q[i]; prefetch(xdpf); } - sent = dev->netdev_ops->ndo_xdp_xmit(dev, bq->count, bq->q, flags); + if (unlikely(bq->xdp_prog)) { + xdp_drop = dev_map_bpf_prog_run(bq->xdp_prog, bq->q, cnt, dev); + cnt -= xdp_drop; + if (!cnt) { + sent = 0; + drops = xdp_drop; + goto out; + } + } + + sent = dev->netdev_ops->ndo_xdp_xmit(dev, cnt, bq->q, flags); if (sent < 0) { err = sent; sent = 0; goto error; } - drops = bq->count - sent; + drops = (cnt - sent) + xdp_drop; out: bq->count = 0; trace_xdp_devmap_xmit(bq->dev_rx, dev, sent, drops, err); bq->dev_rx = NULL; + bq->xdp_prog = NULL; __list_del_clearprev(&bq->flush_node); return; error: /* If ndo_xdp_xmit fails with an errno, no frames have been * xmit'ed and it's our responsibility to them free all. */ - for (i = 0; i < bq->count; i++) { + for (i = 0; i < cnt; i++) { struct xdp_frame *xdpf = bq->q[i]; xdp_return_frame_rx_napi(xdpf); @@ -408,7 +461,7 @@ struct bpf_dtab_netdev *__dev_map_lookup_elem(struct bpf_map *map, u32 key) * Thus, safe percpu variable access. */ static void bq_enqueue(struct net_device *dev, struct xdp_frame *xdpf, - struct net_device *dev_rx) + struct net_device *dev_rx, struct bpf_prog *xdp_prog) { struct list_head *flush_list = this_cpu_ptr(&dev_flush_list); struct xdp_dev_bulk_queue *bq = this_cpu_ptr(dev->xdp_bulkq); @@ -423,6 +476,14 @@ static void bq_enqueue(struct net_device *dev, struct xdp_frame *xdpf, if (!bq->dev_rx) bq->dev_rx = dev_rx; + /* Store (potential) xdp_prog that run before egress to dev as + * part of bulk_queue. This will be same xdp_prog for all + * xdp_frame's in bulk_queue, because this per-CPU store must + * be flushed from net_device drivers NAPI func end. + */ + if (!bq->xdp_prog) + bq->xdp_prog = xdp_prog; + bq->q[bq->count++] = xdpf; if (!bq->flush_node.prev) @@ -430,7 +491,8 @@ static void bq_enqueue(struct net_device *dev, struct xdp_frame *xdpf, } static inline int __xdp_enqueue(struct net_device *dev, struct xdp_buff *xdp, - struct net_device *dev_rx) + struct net_device *dev_rx, + struct bpf_prog *xdp_prog) { struct xdp_frame *xdpf; int err; @@ -446,42 +508,14 @@ static inline int __xdp_enqueue(struct net_device *dev, struct xdp_buff *xdp, if (unlikely(!xdpf)) return -EOVERFLOW; - bq_enqueue(dev, xdpf, dev_rx); + bq_enqueue(dev, xdpf, dev_rx, xdp_prog); return 0; } -static struct xdp_buff *dev_map_run_prog(struct net_device *dev, - struct xdp_buff *xdp, - struct bpf_prog *xdp_prog) -{ - struct xdp_txq_info txq = { .dev = dev }; - u32 act; - - xdp_set_data_meta_invalid(xdp); - xdp->txq = &txq; - - act = bpf_prog_run_xdp(xdp_prog, xdp); - switch (act) { - case XDP_PASS: - return xdp; - case XDP_DROP: - break; - default: - bpf_warn_invalid_xdp_action(act); - fallthrough; - case XDP_ABORTED: - trace_xdp_exception(dev, xdp_prog, act); - break; - } - - xdp_return_buff(xdp); - return NULL; -} - int dev_xdp_enqueue(struct net_device *dev, struct xdp_buff *xdp, struct net_device *dev_rx) { - return __xdp_enqueue(dev, xdp, dev_rx); + return __xdp_enqueue(dev, xdp, dev_rx, NULL); } int dev_map_enqueue(struct bpf_dtab_netdev *dst, struct xdp_buff *xdp, @@ -489,12 +523,7 @@ int dev_map_enqueue(struct bpf_dtab_netdev *dst, struct xdp_buff *xdp, { struct net_device *dev = dst->dev; - if (dst->xdp_prog) { - xdp = dev_map_run_prog(dev, xdp, dst->xdp_prog); - if (!xdp) - return 0; - } - return __xdp_enqueue(dev, xdp, dev_rx); + return __xdp_enqueue(dev, xdp, dev_rx, dst->xdp_prog); } int dev_map_generic_redirect(struct bpf_dtab_netdev *dst, struct sk_buff *skb,