]> git.kernelconcepts.de Git - karo-tx-linux.git/commitdiff
x86/irq: Get rid of an indentation level
authorThomas Gleixner <tglx@linutronix.de>
Sun, 2 Aug 2015 20:38:25 +0000 (20:38 +0000)
committerThomas Gleixner <tglx@linutronix.de>
Wed, 5 Aug 2015 22:14:59 +0000 (00:14 +0200)
Make the code simpler to read.

Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
Cc: Jiang Liu <jiang.liu@linux.intel.com>
Cc: Peter Zijlstra <peterz@infradead.org>
Cc: Rusty Russell <rusty@rustcorp.com.au>
Cc: Bjorn Helgaas <bhelgaas@google.com>
Link: http://lkml.kernel.org/r/20150802203609.555253675@linutronix.de
Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
arch/x86/kernel/irq.c

index 931bdd2f975949ad1166d007187834dabd1d052e..140950fb9902df81cb72a683d233f34aa8464833 100644 (file)
@@ -342,47 +342,45 @@ int check_irq_vectors_for_cpu_disable(void)
        this_count = 0;
        for (vector = FIRST_EXTERNAL_VECTOR; vector < NR_VECTORS; vector++) {
                irq = __this_cpu_read(vector_irq[vector]);
-               if (irq >= 0) {
-                       desc = irq_to_desc(irq);
-                       if (!desc)
-                               continue;
+               if (irq < 0)
+                       continue;
+               desc = irq_to_desc(irq);
+               if (!desc)
+                       continue;
 
-                       /*
-                        * Protect against concurrent action removal,
-                        * affinity changes etc.
-                        */
-                       raw_spin_lock(&desc->lock);
-                       data = irq_desc_get_irq_data(desc);
-                       cpumask_copy(&affinity_new,
-                                    irq_data_get_affinity_mask(data));
-                       cpumask_clear_cpu(this_cpu, &affinity_new);
-
-                       /* Do not count inactive or per-cpu irqs. */
-                       if (!irq_has_action(irq) || irqd_is_per_cpu(data)) {
-                               raw_spin_unlock(&desc->lock);
-                               continue;
-                       }
+               /*
+                * Protect against concurrent action removal, affinity
+                * changes etc.
+                */
+               raw_spin_lock(&desc->lock);
+               data = irq_desc_get_irq_data(desc);
+               cpumask_copy(&affinity_new, irq_data_get_affinity_mask(data));
+               cpumask_clear_cpu(this_cpu, &affinity_new);
 
+               /* Do not count inactive or per-cpu irqs. */
+               if (!irq_has_action(irq) || irqd_is_per_cpu(data)) {
                        raw_spin_unlock(&desc->lock);
-                       /*
-                        * A single irq may be mapped to multiple
-                        * cpu's vector_irq[] (for example IOAPIC cluster
-                        * mode).  In this case we have two
-                        * possibilities:
-                        *
-                        * 1) the resulting affinity mask is empty; that is
-                        * this the down'd cpu is the last cpu in the irq's
-                        * affinity mask, or
-                        *
-                        * 2) the resulting affinity mask is no longer
-                        * a subset of the online cpus but the affinity
-                        * mask is not zero; that is the down'd cpu is the
-                        * last online cpu in a user set affinity mask.
-                        */
-                       if (cpumask_empty(&affinity_new) ||
-                           !cpumask_subset(&affinity_new, &online_new))
-                               this_count++;
+                       continue;
                }
+
+               raw_spin_unlock(&desc->lock);
+               /*
+                * A single irq may be mapped to multiple cpu's
+                * vector_irq[] (for example IOAPIC cluster mode).  In
+                * this case we have two possibilities:
+                *
+                * 1) the resulting affinity mask is empty; that is
+                * this the down'd cpu is the last cpu in the irq's
+                * affinity mask, or
+                *
+                * 2) the resulting affinity mask is no longer a
+                * subset of the online cpus but the affinity mask is
+                * not zero; that is the down'd cpu is the last online
+                * cpu in a user set affinity mask.
+                */
+               if (cpumask_empty(&affinity_new) ||
+                   !cpumask_subset(&affinity_new, &online_new))
+                       this_count++;
        }
 
        count = 0;