]> git.kernelconcepts.de Git - karo-tx-linux.git/commitdiff
drivers: fix up obsolete cpu function usage.
authorRusty Russell <rusty@rustcorp.com.au>
Thu, 5 Mar 2015 00:19:16 +0000 (10:49 +1030)
committerRusty Russell <rusty@rustcorp.com.au>
Thu, 5 Mar 2015 03:07:02 +0000 (13:37 +1030)
Thanks to spatch, plus manual removal of "&*".  Then a sweep for
for_each_cpu_mask => for_each_cpu.

Signed-off-by: Rusty Russell <rusty@rustcorp.com.au>
Acked-by: Rafael J. Wysocki <rafael.j.wysocki@intel.com>
Cc: Thomas Gleixner <tglx@linutronix.de>
Cc: Herbert Xu <herbert@gondor.apana.org.au>
Cc: Jason Cooper <jason@lakedaemon.net>
Cc: Chris Metcalf <cmetcalf@ezchip.com>
Cc: netdev@vger.kernel.org
drivers/clocksource/dw_apb_timer.c
drivers/cpuidle/coupled.c
drivers/crypto/n2_core.c
drivers/irqchip/irq-gic-v3.c
drivers/irqchip/irq-mips-gic.c
drivers/net/ethernet/tile/tilegx.c

index f3656a6b0382c9982563e0abe84054e98a67dc98..35a88097af3c0daef2918c8e93585b004e268623 100644 (file)
@@ -117,7 +117,8 @@ static void apbt_set_mode(enum clock_event_mode mode,
        unsigned long period;
        struct dw_apb_clock_event_device *dw_ced = ced_to_dw_apb_ced(evt);
 
-       pr_debug("%s CPU %d mode=%d\n", __func__, first_cpu(*evt->cpumask),
+       pr_debug("%s CPU %d mode=%d\n", __func__,
+                cpumask_first(evt->cpumask),
                 mode);
 
        switch (mode) {
index 73fe2f8d7f961dad2c227dca5c32f0080e2b8604..7936dce4b8786f0ef00d2246a39d4d6692748e40 100644 (file)
@@ -292,7 +292,7 @@ static inline int cpuidle_coupled_get_state(struct cpuidle_device *dev,
         */
        smp_rmb();
 
-       for_each_cpu_mask(i, coupled->coupled_cpus)
+       for_each_cpu(i, &coupled->coupled_cpus)
                if (cpu_online(i) && coupled->requested_state[i] < state)
                        state = coupled->requested_state[i];
 
@@ -338,7 +338,7 @@ static void cpuidle_coupled_poke_others(int this_cpu,
 {
        int cpu;
 
-       for_each_cpu_mask(cpu, coupled->coupled_cpus)
+       for_each_cpu(cpu, &coupled->coupled_cpus)
                if (cpu != this_cpu && cpu_online(cpu))
                        cpuidle_coupled_poke(cpu);
 }
@@ -638,7 +638,7 @@ int cpuidle_coupled_register_device(struct cpuidle_device *dev)
        if (cpumask_empty(&dev->coupled_cpus))
                return 0;
 
-       for_each_cpu_mask(cpu, dev->coupled_cpus) {
+       for_each_cpu(cpu, &dev->coupled_cpus) {
                other_dev = per_cpu(cpuidle_devices, cpu);
                if (other_dev && other_dev->coupled) {
                        coupled = other_dev->coupled;
index afd136b45f49b1fa0a933cd4a33412f830298bcb..10a9aeff1666ed69bc0dac693a0cdf7b553f225d 100644 (file)
@@ -1754,7 +1754,7 @@ static int spu_mdesc_walk_arcs(struct mdesc_handle *mdesc,
                                dev->dev.of_node->full_name);
                        return -EINVAL;
                }
-               cpu_set(*id, p->sharing);
+               cpumask_set_cpu(*id, &p->sharing);
                table[*id] = p;
        }
        return 0;
@@ -1776,7 +1776,7 @@ static int handle_exec_unit(struct spu_mdesc_info *ip, struct list_head *list,
                return -ENOMEM;
        }
 
-       cpus_clear(p->sharing);
+       cpumask_clear(&p->sharing);
        spin_lock_init(&p->lock);
        p->q_type = q_type;
        INIT_LIST_HEAD(&p->jobs);
index 1c6dea2fbc34ce2d7b00007250111166315d8b78..04b6f0732c1afcb1aa9d20cbe0d6649c02bbe822 100644 (file)
@@ -512,7 +512,7 @@ static void gic_raise_softirq(const struct cpumask *mask, unsigned int irq)
         */
        smp_wmb();
 
-       for_each_cpu_mask(cpu, *mask) {
+       for_each_cpu(cpu, mask) {
                u64 cluster_id = cpu_logical_map(cpu) & ~0xffUL;
                u16 tlist;
 
index 9acdc080e7ecd21b2cd256c18e31c9c27337c7d1..f26307908a2a6bdcb7be537c6ceae6b3db050ef4 100644 (file)
@@ -345,19 +345,19 @@ static int gic_set_affinity(struct irq_data *d, const struct cpumask *cpumask,
        int             i;
 
        cpumask_and(&tmp, cpumask, cpu_online_mask);
-       if (cpus_empty(tmp))
+       if (cpumask_empty(&tmp))
                return -EINVAL;
 
        /* Assumption : cpumask refers to a single CPU */
        spin_lock_irqsave(&gic_lock, flags);
 
        /* Re-route this IRQ */
-       gic_map_to_vpe(irq, first_cpu(tmp));
+       gic_map_to_vpe(irq, cpumask_first(&tmp));
 
        /* Update the pcpu_masks */
        for (i = 0; i < NR_CPUS; i++)
                clear_bit(irq, pcpu_masks[i].pcpu_mask);
-       set_bit(irq, pcpu_masks[first_cpu(tmp)].pcpu_mask);
+       set_bit(irq, pcpu_masks[cpumask_first(&tmp)].pcpu_mask);
 
        cpumask_copy(d->affinity, cpumask);
        spin_unlock_irqrestore(&gic_lock, flags);
index bea8cd2bb56cf85b92132ea69ea90f08e6dfc5f7..deac41498c6e8c9a059dd5eb6933edaf7514ca89 100644 (file)
@@ -1122,7 +1122,7 @@ static int alloc_percpu_mpipe_resources(struct net_device *dev,
                        addr + i * sizeof(struct tile_net_comps);
 
        /* If this is a network cpu, create an iqueue. */
-       if (cpu_isset(cpu, network_cpus_map)) {
+       if (cpumask_test_cpu(cpu, &network_cpus_map)) {
                order = get_order(NOTIF_RING_SIZE);
                page = homecache_alloc_pages(GFP_KERNEL, order, cpu);
                if (page == NULL) {
@@ -1298,7 +1298,7 @@ static int tile_net_init_mpipe(struct net_device *dev)
        int first_ring, ring;
        int instance = mpipe_instance(dev);
        struct mpipe_data *md = &mpipe_data[instance];
-       int network_cpus_count = cpus_weight(network_cpus_map);
+       int network_cpus_count = cpumask_weight(&network_cpus_map);
 
        if (!hash_default) {
                netdev_err(dev, "Networking requires hash_default!\n");