]> git.kernelconcepts.de Git - karo-tx-linux.git/commitdiff
net: Rewrite netif_reset_xps_queue to allow for better code reuse
authorAlexander Duyck <alexander.h.duyck@intel.com>
Thu, 10 Jan 2013 08:57:17 +0000 (08:57 +0000)
committerDavid S. Miller <davem@davemloft.net>
Fri, 11 Jan 2013 06:47:04 +0000 (22:47 -0800)
This patch does a minor refactor on netif_reset_xps_queue to address a few
items I noticed.

First is the fact that we are doing removal of queues in both
netif_reset_xps_queue and netif_set_xps_queue.  Since there is no need to
have the code in two places I am pushing it out into a separate function
and will come back in another patch and reuse the code in
netif_set_xps_queue.

The second item this change addresses is the fact that the Tx queues were
not getting their numa_node value cleared as a part of the XPS queue reset.
This patch resolves that by resetting the numa_node value if the dev_maps
value is set.

Signed-off-by: Alexander Duyck <alexander.h.duyck@intel.com>
Signed-off-by: David S. Miller <davem@davemloft.net>
net/core/dev.c

index 257b29516f69c85ee7ca3e1f272f812af65dd5c6..231de8738149ba096b428433325a9eac871a5574 100644 (file)
@@ -1862,45 +1862,55 @@ static DEFINE_MUTEX(xps_map_mutex);
 #define xmap_dereference(P)            \
        rcu_dereference_protected((P), lockdep_is_held(&xps_map_mutex))
 
-void netif_reset_xps_queue(struct net_device *dev, u16 index)
+static struct xps_map *remove_xps_queue(struct xps_dev_maps *dev_maps,
+                                       int cpu, u16 index)
 {
-       struct xps_dev_maps *dev_maps;
-       struct xps_map *map;
-       int i, pos, nonempty = 0;
-
-       mutex_lock(&xps_map_mutex);
-       dev_maps = xmap_dereference(dev->xps_maps);
-
-       if (!dev_maps)
-               goto out_no_maps;
+       struct xps_map *map = NULL;
+       int pos;
 
-       for_each_possible_cpu(i) {
-               map = xmap_dereference(dev_maps->cpu_map[i]);
-               if (!map)
-                       continue;
-
-               for (pos = 0; pos < map->len; pos++)
-                       if (map->queues[pos] == index)
-                               break;
+       if (dev_maps)
+               map = xmap_dereference(dev_maps->cpu_map[cpu]);
 
-               if (pos < map->len) {
+       for (pos = 0; map && pos < map->len; pos++) {
+               if (map->queues[pos] == index) {
                        if (map->len > 1) {
                                map->queues[pos] = map->queues[--map->len];
                        } else {
-                               RCU_INIT_POINTER(dev_maps->cpu_map[i], NULL);
+                               RCU_INIT_POINTER(dev_maps->cpu_map[cpu], NULL);
                                kfree_rcu(map, rcu);
                                map = NULL;
                        }
+                       break;
                }
-               if (map)
-                       nonempty = 1;
        }
 
-       if (!nonempty) {
+       return map;
+}
+
+void netif_reset_xps_queue(struct net_device *dev, u16 index)
+{
+       struct xps_dev_maps *dev_maps;
+       int cpu;
+       bool active = false;
+
+       mutex_lock(&xps_map_mutex);
+       dev_maps = xmap_dereference(dev->xps_maps);
+
+       if (!dev_maps)
+               goto out_no_maps;
+
+       for_each_possible_cpu(cpu) {
+               if (remove_xps_queue(dev_maps, cpu, index))
+                       active = true;
+       }
+
+       if (!active) {
                RCU_INIT_POINTER(dev->xps_maps, NULL);
                kfree_rcu(dev_maps, rcu);
        }
 
+       netdev_queue_numa_node_write(netdev_get_tx_queue(dev, index),
+                                    NUMA_NO_NODE);
 out_no_maps:
        mutex_unlock(&xps_map_mutex);
 }