Message ID | 20221002151702.3932770-4-yury.norov@gmail.com (mailing list archive) |
---|---|
State | Superseded |
Delegated to: | Netdev Maintainers |
Headers | show |
Series | net: drop netif_attrmask_next*() | expand |
Context | Check | Description |
---|---|---|
netdev/tree_selection | success | Guessing tree name failed - patch did not apply, async |
On Sun, Oct 02, 2022 at 08:17:01AM -0700, Yury Norov wrote: > If the mask is initialized unconditionally, it's possible to use bitmap > API to traverse it, which is done in the following patch. > > Signed-off-by: Yury Norov <yury.norov@gmail.com> > --- > net/core/dev.c | 21 ++++++++++++--------- > 1 file changed, 12 insertions(+), 9 deletions(-) > > diff --git a/net/core/dev.c b/net/core/dev.c > index 39a4cc7b3a06..266378ad1cf1 100644 > --- a/net/core/dev.c > +++ b/net/core/dev.c > @@ -2542,7 +2542,7 @@ int __netif_set_xps_queue(struct net_device *dev, const unsigned long *mask, > u16 index, enum xps_map_type type) > { > struct xps_dev_maps *dev_maps, *new_dev_maps = NULL, *old_dev_maps = NULL; > - const unsigned long *online_mask = NULL; > + const unsigned long *online_mask; > bool active = false, copy = false; > int i, j, tci, numa_node_id = -2; > int maps_sz, num_tc = 1, tc = 0; > @@ -2565,9 +2565,11 @@ int __netif_set_xps_queue(struct net_device *dev, const unsigned long *mask, > > if (type == XPS_RXQS) { > nr_ids = dev->num_rx_queues; > + online_mask = bitmap_alloc(nr_ids, GFP_KERNEL); > + if (!online_mask) > + return -ENOMEM; Oh god, I missed a line here while preparing the patch. It should be: + online_mask = bitmap_alloc(nr_ids, GFP_KERNEL); + if (!online_mask) + return -ENOMEM; + bitmap_fill(online_mask, nr_ids); I'll send v2 after collecting the comments. > } else { > - if (num_possible_cpus() > 1) > - online_mask = cpumask_bits(cpu_online_mask); > + online_mask = cpumask_bits(cpu_online_mask); > nr_ids = nr_cpu_ids; > } > > @@ -2593,10 +2595,8 @@ int __netif_set_xps_queue(struct net_device *dev, const unsigned long *mask, > j < nr_ids;) { > if (!new_dev_maps) { > new_dev_maps = kzalloc(maps_sz, GFP_KERNEL); > - if (!new_dev_maps) { > - mutex_unlock(&xps_map_mutex); > - return -ENOMEM; > - } > + if (!new_dev_maps) > + goto err_out; > > new_dev_maps->nr_ids = nr_ids; > new_dev_maps->num_tc = num_tc; > @@ -2718,7 +2718,8 @@ int __netif_set_xps_queue(struct net_device *dev, const unsigned long *mask, > > out_no_maps: > mutex_unlock(&xps_map_mutex); > - > + if (type == XPS_RXQS) > + bitmap_free(online_mask); > return 0; > error: > /* remove any maps that we added */ > @@ -2733,8 +2734,10 @@ int __netif_set_xps_queue(struct net_device *dev, const unsigned long *mask, > } > } > > +err_out: > mutex_unlock(&xps_map_mutex); > - > + if (type == XPS_RXQS) > + bitmap_free(online_mask); > kfree(new_dev_maps); > return -ENOMEM; > } > -- > 2.34.1
diff --git a/net/core/dev.c b/net/core/dev.c index 39a4cc7b3a06..266378ad1cf1 100644 --- a/net/core/dev.c +++ b/net/core/dev.c @@ -2542,7 +2542,7 @@ int __netif_set_xps_queue(struct net_device *dev, const unsigned long *mask, u16 index, enum xps_map_type type) { struct xps_dev_maps *dev_maps, *new_dev_maps = NULL, *old_dev_maps = NULL; - const unsigned long *online_mask = NULL; + const unsigned long *online_mask; bool active = false, copy = false; int i, j, tci, numa_node_id = -2; int maps_sz, num_tc = 1, tc = 0; @@ -2565,9 +2565,11 @@ int __netif_set_xps_queue(struct net_device *dev, const unsigned long *mask, if (type == XPS_RXQS) { nr_ids = dev->num_rx_queues; + online_mask = bitmap_alloc(nr_ids, GFP_KERNEL); + if (!online_mask) + return -ENOMEM; } else { - if (num_possible_cpus() > 1) - online_mask = cpumask_bits(cpu_online_mask); + online_mask = cpumask_bits(cpu_online_mask); nr_ids = nr_cpu_ids; } @@ -2593,10 +2595,8 @@ int __netif_set_xps_queue(struct net_device *dev, const unsigned long *mask, j < nr_ids;) { if (!new_dev_maps) { new_dev_maps = kzalloc(maps_sz, GFP_KERNEL); - if (!new_dev_maps) { - mutex_unlock(&xps_map_mutex); - return -ENOMEM; - } + if (!new_dev_maps) + goto err_out; new_dev_maps->nr_ids = nr_ids; new_dev_maps->num_tc = num_tc; @@ -2718,7 +2718,8 @@ int __netif_set_xps_queue(struct net_device *dev, const unsigned long *mask, out_no_maps: mutex_unlock(&xps_map_mutex); - + if (type == XPS_RXQS) + bitmap_free(online_mask); return 0; error: /* remove any maps that we added */ @@ -2733,8 +2734,10 @@ int __netif_set_xps_queue(struct net_device *dev, const unsigned long *mask, } } +err_out: mutex_unlock(&xps_map_mutex); - + if (type == XPS_RXQS) + bitmap_free(online_mask); kfree(new_dev_maps); return -ENOMEM; }
If the mask is initialized unconditionally, it's possible to use bitmap API to traverse it, which is done in the following patch. Signed-off-by: Yury Norov <yury.norov@gmail.com> --- net/core/dev.c | 21 ++++++++++++--------- 1 file changed, 12 insertions(+), 9 deletions(-)