]> git.kernelconcepts.de Git - karo-tx-linux.git/commitdiff
genirq: Remove mask argument from setup_affinity()
authorThomas Gleixner <tglx@linutronix.de>
Mon, 19 Jun 2017 23:37:21 +0000 (01:37 +0200)
committerThomas Gleixner <tglx@linutronix.de>
Thu, 22 Jun 2017 16:21:14 +0000 (18:21 +0200)
No point to have this alloc/free dance of cpumasks. Provide a static mask
for setup_affinity() and protect it proper.

Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
Cc: Jens Axboe <axboe@kernel.dk>
Cc: Marc Zyngier <marc.zyngier@arm.com>
Cc: Michael Ellerman <mpe@ellerman.id.au>
Cc: Keith Busch <keith.busch@intel.com>
Cc: Peter Zijlstra <peterz@infradead.org>
Cc: Christoph Hellwig <hch@lst.de>
Link: http://lkml.kernel.org/r/20170619235444.851571573@linutronix.de
kernel/irq/internals.h
kernel/irq/manage.c
kernel/irq/proc.c

index 094db5bfb83fc3abe40fb08cc20b4069e78b6aaa..33ca83816b8c87620b7d6ed22cf5adf58b744d1b 100644 (file)
@@ -109,7 +109,7 @@ static inline void unregister_handler_proc(unsigned int irq,
 
 extern bool irq_can_set_affinity_usr(unsigned int irq);
 
-extern int irq_select_affinity_usr(unsigned int irq, struct cpumask *mask);
+extern int irq_select_affinity_usr(unsigned int irq);
 
 extern void irq_set_thread_affinity(struct irq_desc *desc);
 
index 284f4eb1ffbec50fd8d25075b982407bfdec8279..e2f20d553d600fee8457a13ab72efd9207d4feab 100644 (file)
@@ -345,15 +345,18 @@ EXPORT_SYMBOL_GPL(irq_set_affinity_notifier);
 /*
  * Generic version of the affinity autoselector.
  */
-static int setup_affinity(struct irq_desc *desc, struct cpumask *mask)
+static int irq_setup_affinity(struct irq_desc *desc)
 {
        struct cpumask *set = irq_default_affinity;
-       int node = irq_desc_get_node(desc);
+       int ret, node = irq_desc_get_node(desc);
+       static DEFINE_RAW_SPINLOCK(mask_lock);
+       static struct cpumask mask;
 
        /* Excludes PER_CPU and NO_BALANCE interrupts */
        if (!__irq_can_set_affinity(desc))
                return 0;
 
+       raw_spin_lock(&mask_lock);
        /*
         * Preserve the managed affinity setting and a userspace affinity
         * setup, but make sure that one of the targets is online.
@@ -367,43 +370,42 @@ static int setup_affinity(struct irq_desc *desc, struct cpumask *mask)
                        irqd_clear(&desc->irq_data, IRQD_AFFINITY_SET);
        }
 
-       cpumask_and(mask, cpu_online_mask, set);
+       cpumask_and(&mask, cpu_online_mask, set);
        if (node != NUMA_NO_NODE) {
                const struct cpumask *nodemask = cpumask_of_node(node);
 
                /* make sure at least one of the cpus in nodemask is online */
-               if (cpumask_intersects(mask, nodemask))
-                       cpumask_and(mask, mask, nodemask);
+               if (cpumask_intersects(&mask, nodemask))
+                       cpumask_and(&mask, &mask, nodemask);
        }
-       irq_do_set_affinity(&desc->irq_data, mask, false);
-       return 0;
+       ret = irq_do_set_affinity(&desc->irq_data, &mask, false);
+       raw_spin_unlock(&mask_lock);
+       return ret;
 }
 #else
 /* Wrapper for ALPHA specific affinity selector magic */
-static inline int setup_affinity(struct irq_desc *d, struct cpumask *mask)
+int irq_setup_affinity(struct irq_desc *desc)
 {
-       return irq_select_affinity(irq_desc_get_irq(d));
+       return irq_select_affinity(irq_desc_get_irq(desc));
 }
 #endif
 
 /*
- * Called when affinity is set via /proc/irq
+ * Called when a bogus affinity is set via /proc/irq
  */
-int irq_select_affinity_usr(unsigned int irq, struct cpumask *mask)
+int irq_select_affinity_usr(unsigned int irq)
 {
        struct irq_desc *desc = irq_to_desc(irq);
        unsigned long flags;
        int ret;
 
        raw_spin_lock_irqsave(&desc->lock, flags);
-       ret = setup_affinity(desc, mask);
+       ret = irq_setup_affinity(desc);
        raw_spin_unlock_irqrestore(&desc->lock, flags);
        return ret;
 }
-
 #else
-static inline int
-setup_affinity(struct irq_desc *desc, struct cpumask *mask)
+static inline int setup_affinity(struct irq_desc *desc)
 {
        return 0;
 }
@@ -1128,7 +1130,6 @@ __setup_irq(unsigned int irq, struct irq_desc *desc, struct irqaction *new)
        struct irqaction *old, **old_ptr;
        unsigned long flags, thread_mask = 0;
        int ret, nested, shared = 0;
-       cpumask_var_t mask;
 
        if (!desc)
                return -EINVAL;
@@ -1187,11 +1188,6 @@ __setup_irq(unsigned int irq, struct irq_desc *desc, struct irqaction *new)
                }
        }
 
-       if (!alloc_cpumask_var(&mask, GFP_KERNEL)) {
-               ret = -ENOMEM;
-               goto out_thread;
-       }
-
        /*
         * Drivers are often written to work w/o knowledge about the
         * underlying irq chip implementation, so a request for a
@@ -1256,7 +1252,7 @@ __setup_irq(unsigned int irq, struct irq_desc *desc, struct irqaction *new)
                 */
                if (thread_mask == ~0UL) {
                        ret = -EBUSY;
-                       goto out_mask;
+                       goto out_unlock;
                }
                /*
                 * The thread_mask for the action is or'ed to
@@ -1300,7 +1296,7 @@ __setup_irq(unsigned int irq, struct irq_desc *desc, struct irqaction *new)
                pr_err("Threaded irq requested with handler=NULL and !ONESHOT for irq %d\n",
                       irq);
                ret = -EINVAL;
-               goto out_mask;
+               goto out_unlock;
        }
 
        if (!shared) {
@@ -1308,7 +1304,7 @@ __setup_irq(unsigned int irq, struct irq_desc *desc, struct irqaction *new)
                if (ret) {
                        pr_err("Failed to request resources for %s (irq %d) on irqchip %s\n",
                               new->name, irq, desc->irq_data.chip->name);
-                       goto out_mask;
+                       goto out_unlock;
                }
 
                init_waitqueue_head(&desc->wait_for_threads);
@@ -1320,7 +1316,7 @@ __setup_irq(unsigned int irq, struct irq_desc *desc, struct irqaction *new)
 
                        if (ret) {
                                irq_release_resources(desc);
-                               goto out_mask;
+                               goto out_unlock;
                        }
                }
 
@@ -1357,7 +1353,7 @@ __setup_irq(unsigned int irq, struct irq_desc *desc, struct irqaction *new)
                }
 
                /* Set default affinity mask once everything is setup */
-               setup_affinity(desc, mask);
+               irq_setup_affinity(desc);
 
        } else if (new->flags & IRQF_TRIGGER_MASK) {
                unsigned int nmsk = new->flags & IRQF_TRIGGER_MASK;
@@ -1401,8 +1397,6 @@ __setup_irq(unsigned int irq, struct irq_desc *desc, struct irqaction *new)
        irq_add_debugfs_entry(irq, desc);
        new->dir = NULL;
        register_handler_proc(irq, new);
-       free_cpumask_var(mask);
-
        return 0;
 
 mismatch:
@@ -1415,9 +1409,8 @@ mismatch:
        }
        ret = -EBUSY;
 
-out_mask:
+out_unlock:
        raw_spin_unlock_irqrestore(&desc->lock, flags);
-       free_cpumask_var(mask);
 
 out_thread:
        if (new->thread) {
index c53edad7b459d99e8dd98dccab743bf86c2023a2..d35bb8d4c31794ff39e2b555abb9724e8a702885 100644 (file)
@@ -120,9 +120,11 @@ static ssize_t write_irq_affinity(int type, struct file *file,
         * one online CPU still has to be targeted.
         */
        if (!cpumask_intersects(new_value, cpu_online_mask)) {
-               /* Special case for empty set - allow the architecture
-                  code to set default SMP affinity. */
-               err = irq_select_affinity_usr(irq, new_value) ? -EINVAL : count;
+               /*
+                * Special case for empty set - allow the architecture code
+                * to set default SMP affinity.
+                */
+               err = irq_select_affinity_usr(irq) ? -EINVAL : count;
        } else {
                irq_set_affinity(irq, new_value);
                err = count;