]> git.kernelconcepts.de Git - karo-tx-linux.git/commitdiff
[S390] outstanding interrupts vs. smp_send_stop
authorMartin Schwidefsky <schwidefsky@de.ibm.com>
Tue, 27 Dec 2011 10:27:22 +0000 (11:27 +0100)
committerMartin Schwidefsky <schwidefsky@de.ibm.com>
Tue, 27 Dec 2011 10:27:13 +0000 (11:27 +0100)
The panic function will first print the panic message to the console,
then stop additional cpus with smp_send_stop and finally call the
function on the panic notifier list.
In case of an I/O based console the panic message will cause I/O to
be started and a function on the panic notifier list will wait for the
completion of the I/O. That does not work if an I/O completion interrupt
has already been delivered to a cpu that is then stopped by smp_send_stop.
To break this cyclic dependency add code to smp_send_stop that gives
the additional cpu the opportunity to complete outstanding interrupts.

Signed-off-by: Martin Schwidefsky <schwidefsky@de.ibm.com>
arch/s390/include/asm/sigp.h
arch/s390/kernel/smp.c

index e3bffd4e2d6662b08e8dab76cd4dc924508ead3d..7040b8567cd05d89f8d5f0da87b998b6880ba09d 100644 (file)
@@ -56,6 +56,7 @@ enum {
        ec_schedule = 0,
        ec_call_function,
        ec_call_function_single,
+       ec_stop_cpu,
 };
 
 /*
index 8aba77df68a9e8337f1aba29e9d98bca30fc6d3d..b1cd3293671237986ccd9b19dbcd464bcab7430a 100644 (file)
@@ -154,22 +154,52 @@ void smp_switch_to_ipl_cpu(void (*func)(void *), void *data)
        smp_switch_to_cpu(func, data, sp, stap(), __cpu_logical_map[0]);
 }
 
+static void smp_stop_cpu(void)
+{
+       while (sigp(smp_processor_id(), sigp_stop) == sigp_busy)
+               cpu_relax();
+}
+
 void smp_send_stop(void)
 {
-       int cpu, rc;
+       cpumask_t cpumask;
+       int cpu;
+       u64 end;
 
        /* Disable all interrupts/machine checks */
        __load_psw_mask(psw_kernel_bits | PSW_MASK_DAT);
        trace_hardirqs_off();
 
-       /* stop all processors */
-       for_each_online_cpu(cpu) {
-               if (cpu == smp_processor_id())
-                       continue;
-               do {
-                       rc = sigp(cpu, sigp_stop);
-               } while (rc == sigp_busy);
+       cpumask_copy(&cpumask, cpu_online_mask);
+       cpumask_clear_cpu(smp_processor_id(), &cpumask);
+
+       if (oops_in_progress) {
+               /*
+                * Give the other cpus the opportunity to complete
+                * outstanding interrupts before stopping them.
+                */
+               end = get_clock() + (1000000UL << 12);
+               for_each_cpu(cpu, &cpumask) {
+                       set_bit(ec_stop_cpu, (unsigned long *)
+                               &lowcore_ptr[cpu]->ext_call_fast);
+                       while (sigp(cpu, sigp_emergency_signal) == sigp_busy &&
+                              get_clock() < end)
+                               cpu_relax();
+               }
+               while (get_clock() < end) {
+                       for_each_cpu(cpu, &cpumask)
+                               if (cpu_stopped(cpu))
+                                       cpumask_clear_cpu(cpu, &cpumask);
+                       if (cpumask_empty(&cpumask))
+                               break;
+                       cpu_relax();
+               }
+       }
 
+       /* stop all processors */
+       for_each_cpu(cpu, &cpumask) {
+               while (sigp(cpu, sigp_stop) == sigp_busy)
+                       cpu_relax();
                while (!cpu_stopped(cpu))
                        cpu_relax();
        }
@@ -194,6 +224,9 @@ static void do_ext_call_interrupt(unsigned int ext_int_code,
         */
        bits = xchg(&S390_lowcore.ext_call_fast, 0);
 
+       if (test_bit(ec_stop_cpu, &bits))
+               smp_stop_cpu();
+
        if (test_bit(ec_schedule, &bits))
                scheduler_ipi();
 
@@ -202,6 +235,7 @@ static void do_ext_call_interrupt(unsigned int ext_int_code,
 
        if (test_bit(ec_call_function_single, &bits))
                generic_smp_call_function_single_interrupt();
+
 }
 
 /*