2 * linux/kernel/softirq.c
4 * Copyright (C) 1992 Linus Torvalds
6 * Distribute under GPLv2.
8 * Rewritten. Old one was good in 2.2, but in 2.3 it was immoral. --ANK (990903)
10 * Remote softirq infrastructure is by Jens Axboe.
13 #include <linux/module.h>
14 #include <linux/kernel_stat.h>
15 #include <linux/interrupt.h>
16 #include <linux/init.h>
18 #include <linux/notifier.h>
19 #include <linux/percpu.h>
20 #include <linux/cpu.h>
21 #include <linux/freezer.h>
22 #include <linux/kthread.h>
23 #include <linux/rcupdate.h>
24 #include <linux/ftrace.h>
25 #include <linux/smp.h>
26 #include <linux/tick.h>
28 #define CREATE_TRACE_POINTS
29 #include <trace/events/irq.h>
33 - No shared variables, all the data are CPU local.
34 - If a softirq needs serialization, let it serialize itself
36 - Even if softirq is serialized, only local cpu is marked for
37 execution. Hence, we get something sort of weak cpu binding.
38 Though it is still not clear, will it result in better locality
42 - NET RX softirq. It is multithreaded and does not require
43 any global serialization.
44 - NET TX softirq. It kicks software netdevice queues, hence
45 it is logically serialized per device, but this serialization
46 is invisible to common code.
47 - Tasklets: serialized wrt itself.
50 #ifndef __ARCH_IRQ_STAT
51 irq_cpustat_t irq_stat[NR_CPUS] ____cacheline_aligned;
52 EXPORT_SYMBOL(irq_stat);
55 static struct softirq_action softirq_vec[NR_SOFTIRQS] __cacheline_aligned_in_smp;
57 static DEFINE_PER_CPU(struct task_struct *, ksoftirqd);
59 char *softirq_to_name[NR_SOFTIRQS] = {
60 "HI", "TIMER", "NET_TX", "NET_RX", "BLOCK", "BLOCK_IOPOLL",
61 "TASKLET", "SCHED", "HRTIMER", "RCU"
65 * we cannot loop indefinitely here to avoid userspace starvation,
66 * but we also don't want to introduce a worst case 1/HZ latency
67 * to the pending events, so lets the scheduler to balance
68 * the softirq load for us.
70 void wakeup_softirqd(void)
72 /* Interrupts are disabled: no need to stop preemption */
73 struct task_struct *tsk = __get_cpu_var(ksoftirqd);
75 if (tsk && tsk->state != TASK_RUNNING)
80 * preempt_count and SOFTIRQ_OFFSET usage:
81 * - preempt_count is changed by SOFTIRQ_OFFSET on entering or leaving
83 * - preempt_count is changed by SOFTIRQ_DISABLE_OFFSET (= 2 * SOFTIRQ_OFFSET)
84 * on local_bh_disable or local_bh_enable.
85 * This lets us distinguish between whether we are currently processing
86 * softirq and whether we just have bh disabled.
90 * This one is for softirq.c-internal use,
91 * where hardirqs are disabled legitimately:
93 #ifdef CONFIG_TRACE_IRQFLAGS
94 static void __local_bh_disable(unsigned long ip, unsigned int cnt)
98 WARN_ON_ONCE(in_irq());
100 raw_local_irq_save(flags);
102 * The preempt tracer hooks into add_preempt_count and will break
103 * lockdep because it calls back into lockdep after SOFTIRQ_OFFSET
104 * is set and before current->softirq_enabled is cleared.
105 * We must manually increment preempt_count here and manually
106 * call the trace_preempt_off later.
108 preempt_count() += cnt;
110 * Were softirqs turned off above:
112 if (softirq_count() == cnt)
113 trace_softirqs_off(ip);
114 raw_local_irq_restore(flags);
116 if (preempt_count() == cnt)
117 trace_preempt_off(CALLER_ADDR0, get_parent_ip(CALLER_ADDR1));
119 #else /* !CONFIG_TRACE_IRQFLAGS */
120 static inline void __local_bh_disable(unsigned long ip, unsigned int cnt)
122 add_preempt_count(cnt);
125 #endif /* CONFIG_TRACE_IRQFLAGS */
127 void local_bh_disable(void)
129 __local_bh_disable((unsigned long)__builtin_return_address(0),
130 SOFTIRQ_DISABLE_OFFSET);
133 EXPORT_SYMBOL(local_bh_disable);
135 static void __local_bh_enable(unsigned int cnt)
137 WARN_ON_ONCE(in_irq());
138 WARN_ON_ONCE(!irqs_disabled());
140 if (softirq_count() == cnt)
141 trace_softirqs_on((unsigned long)__builtin_return_address(0));
142 sub_preempt_count(cnt);
146 * Special-case - softirqs can safely be enabled in
147 * cond_resched_softirq(), or by __do_softirq(),
148 * without processing still-pending softirqs:
150 void _local_bh_enable(void)
152 __local_bh_enable(SOFTIRQ_DISABLE_OFFSET);
155 EXPORT_SYMBOL(_local_bh_enable);
157 static inline void _local_bh_enable_ip(unsigned long ip)
159 WARN_ON_ONCE(in_irq() || irqs_disabled());
160 #ifdef CONFIG_TRACE_IRQFLAGS
164 * Are softirqs going to be turned on now:
166 if (softirq_count() == SOFTIRQ_DISABLE_OFFSET)
167 trace_softirqs_on(ip);
169 * Keep preemption disabled until we are done with
170 * softirq processing:
172 sub_preempt_count(SOFTIRQ_DISABLE_OFFSET - 1);
174 if (unlikely(!in_interrupt() && local_softirq_pending()))
178 #ifdef CONFIG_TRACE_IRQFLAGS
181 preempt_check_resched();
184 void local_bh_enable(void)
186 _local_bh_enable_ip((unsigned long)__builtin_return_address(0));
188 EXPORT_SYMBOL(local_bh_enable);
190 void local_bh_enable_ip(unsigned long ip)
192 _local_bh_enable_ip(ip);
194 EXPORT_SYMBOL(local_bh_enable_ip);
197 * We restart softirq processing MAX_SOFTIRQ_RESTART times,
198 * and we fall back to softirqd after that.
200 * This number has been established via experimentation.
201 * The two things to balance is latency against fairness -
202 * we want to handle softirqs as soon as possible, but they
203 * should not be able to lock up the box.
205 #define MAX_SOFTIRQ_RESTART 10
207 asmlinkage void __do_softirq(void)
209 struct softirq_action *h;
211 int max_restart = MAX_SOFTIRQ_RESTART;
214 pending = local_softirq_pending();
215 account_system_vtime(current);
217 __local_bh_disable((unsigned long)__builtin_return_address(0),
219 lockdep_softirq_enter();
221 cpu = smp_processor_id();
223 /* Reset the pending bitmask before enabling irqs */
224 set_softirq_pending(0);
232 int prev_count = preempt_count();
233 kstat_incr_softirqs_this_cpu(h - softirq_vec);
235 trace_softirq_entry(h, softirq_vec);
237 trace_softirq_exit(h, softirq_vec);
238 if (unlikely(prev_count != preempt_count())) {
239 printk(KERN_ERR "huh, entered softirq %td %s %p"
240 "with preempt_count %08x,"
241 " exited with %08x?\n", h - softirq_vec,
242 softirq_to_name[h - softirq_vec],
243 h->action, prev_count, preempt_count());
244 preempt_count() = prev_count;
255 pending = local_softirq_pending();
256 if (pending && --max_restart)
262 lockdep_softirq_exit();
264 account_system_vtime(current);
265 __local_bh_enable(SOFTIRQ_OFFSET);
268 #ifndef __ARCH_HAS_DO_SOFTIRQ
270 asmlinkage void do_softirq(void)
278 local_irq_save(flags);
280 pending = local_softirq_pending();
285 local_irq_restore(flags);
291 * Enter an interrupt context.
295 int cpu = smp_processor_id();
298 if (idle_cpu(cpu) && !in_interrupt()) {
300 tick_check_idle(cpu);
305 #ifdef __ARCH_IRQ_EXIT_IRQS_DISABLED
306 # define invoke_softirq() __do_softirq()
308 # define invoke_softirq() do_softirq()
312 * Exit an interrupt context. Process softirqs if needed and possible:
316 account_system_vtime(current);
317 trace_hardirq_exit();
318 sub_preempt_count(IRQ_EXIT_OFFSET);
319 if (!in_interrupt() && local_softirq_pending())
323 /* Make sure that timer wheel updates are propagated */
325 if (idle_cpu(smp_processor_id()) && !in_interrupt() && !need_resched())
326 tick_nohz_stop_sched_tick(0);
328 preempt_enable_no_resched();
332 * This function must run with irqs disabled!
334 inline void raise_softirq_irqoff(unsigned int nr)
336 __raise_softirq_irqoff(nr);
339 * If we're in an interrupt or softirq, we're done
340 * (this also catches softirq-disabled code). We will
341 * actually run the softirq once we return from
342 * the irq or softirq.
344 * Otherwise we wake up ksoftirqd to make sure we
345 * schedule the softirq soon.
351 void raise_softirq(unsigned int nr)
355 local_irq_save(flags);
356 raise_softirq_irqoff(nr);
357 local_irq_restore(flags);
360 void open_softirq(int nr, void (*action)(struct softirq_action *))
362 softirq_vec[nr].action = action;
370 struct tasklet_struct *head;
371 struct tasklet_struct **tail;
374 static DEFINE_PER_CPU(struct tasklet_head, tasklet_vec);
375 static DEFINE_PER_CPU(struct tasklet_head, tasklet_hi_vec);
377 void __tasklet_schedule(struct tasklet_struct *t)
381 local_irq_save(flags);
383 *__get_cpu_var(tasklet_vec).tail = t;
384 __get_cpu_var(tasklet_vec).tail = &(t->next);
385 raise_softirq_irqoff(TASKLET_SOFTIRQ);
386 local_irq_restore(flags);
389 EXPORT_SYMBOL(__tasklet_schedule);
391 void __tasklet_hi_schedule(struct tasklet_struct *t)
395 local_irq_save(flags);
397 *__get_cpu_var(tasklet_hi_vec).tail = t;
398 __get_cpu_var(tasklet_hi_vec).tail = &(t->next);
399 raise_softirq_irqoff(HI_SOFTIRQ);
400 local_irq_restore(flags);
403 EXPORT_SYMBOL(__tasklet_hi_schedule);
405 void __tasklet_hi_schedule_first(struct tasklet_struct *t)
407 BUG_ON(!irqs_disabled());
409 t->next = __get_cpu_var(tasklet_hi_vec).head;
410 __get_cpu_var(tasklet_hi_vec).head = t;
411 __raise_softirq_irqoff(HI_SOFTIRQ);
414 EXPORT_SYMBOL(__tasklet_hi_schedule_first);
416 static void tasklet_action(struct softirq_action *a)
418 struct tasklet_struct *list;
421 list = __get_cpu_var(tasklet_vec).head;
422 __get_cpu_var(tasklet_vec).head = NULL;
423 __get_cpu_var(tasklet_vec).tail = &__get_cpu_var(tasklet_vec).head;
427 struct tasklet_struct *t = list;
431 if (tasklet_trylock(t)) {
432 if (!atomic_read(&t->count)) {
433 if (!test_and_clear_bit(TASKLET_STATE_SCHED, &t->state))
444 *__get_cpu_var(tasklet_vec).tail = t;
445 __get_cpu_var(tasklet_vec).tail = &(t->next);
446 __raise_softirq_irqoff(TASKLET_SOFTIRQ);
451 static void tasklet_hi_action(struct softirq_action *a)
453 struct tasklet_struct *list;
456 list = __get_cpu_var(tasklet_hi_vec).head;
457 __get_cpu_var(tasklet_hi_vec).head = NULL;
458 __get_cpu_var(tasklet_hi_vec).tail = &__get_cpu_var(tasklet_hi_vec).head;
462 struct tasklet_struct *t = list;
466 if (tasklet_trylock(t)) {
467 if (!atomic_read(&t->count)) {
468 if (!test_and_clear_bit(TASKLET_STATE_SCHED, &t->state))
479 *__get_cpu_var(tasklet_hi_vec).tail = t;
480 __get_cpu_var(tasklet_hi_vec).tail = &(t->next);
481 __raise_softirq_irqoff(HI_SOFTIRQ);
487 void tasklet_init(struct tasklet_struct *t,
488 void (*func)(unsigned long), unsigned long data)
492 atomic_set(&t->count, 0);
497 EXPORT_SYMBOL(tasklet_init);
499 void tasklet_kill(struct tasklet_struct *t)
502 printk("Attempt to kill tasklet from interrupt\n");
504 while (test_and_set_bit(TASKLET_STATE_SCHED, &t->state)) {
507 } while (test_bit(TASKLET_STATE_SCHED, &t->state));
509 tasklet_unlock_wait(t);
510 clear_bit(TASKLET_STATE_SCHED, &t->state);
513 EXPORT_SYMBOL(tasklet_kill);
520 * The trampoline is called when the hrtimer expires. If this is
521 * called from the hrtimer interrupt then we schedule the tasklet as
522 * the timer callback function expects to run in softirq context. If
523 * it's called in softirq context anyway (i.e. high resolution timers
524 * disabled) then the hrtimer callback is called right away.
526 static enum hrtimer_restart __hrtimer_tasklet_trampoline(struct hrtimer *timer)
528 struct tasklet_hrtimer *ttimer =
529 container_of(timer, struct tasklet_hrtimer, timer);
531 if (hrtimer_is_hres_active(timer)) {
532 tasklet_hi_schedule(&ttimer->tasklet);
533 return HRTIMER_NORESTART;
535 return ttimer->function(timer);
539 * Helper function which calls the hrtimer callback from
540 * tasklet/softirq context
542 static void __tasklet_hrtimer_trampoline(unsigned long data)
544 struct tasklet_hrtimer *ttimer = (void *)data;
545 enum hrtimer_restart restart;
547 restart = ttimer->function(&ttimer->timer);
548 if (restart != HRTIMER_NORESTART)
549 hrtimer_restart(&ttimer->timer);
553 * tasklet_hrtimer_init - Init a tasklet/hrtimer combo for softirq callbacks
554 * @ttimer: tasklet_hrtimer which is initialized
555 * @function: hrtimer callback funtion which gets called from softirq context
556 * @which_clock: clock id (CLOCK_MONOTONIC/CLOCK_REALTIME)
557 * @mode: hrtimer mode (HRTIMER_MODE_ABS/HRTIMER_MODE_REL)
559 void tasklet_hrtimer_init(struct tasklet_hrtimer *ttimer,
560 enum hrtimer_restart (*function)(struct hrtimer *),
561 clockid_t which_clock, enum hrtimer_mode mode)
563 hrtimer_init(&ttimer->timer, which_clock, mode);
564 ttimer->timer.function = __hrtimer_tasklet_trampoline;
565 tasklet_init(&ttimer->tasklet, __tasklet_hrtimer_trampoline,
566 (unsigned long)ttimer);
567 ttimer->function = function;
569 EXPORT_SYMBOL_GPL(tasklet_hrtimer_init);
572 * Remote softirq bits
575 DEFINE_PER_CPU(struct list_head [NR_SOFTIRQS], softirq_work_list);
576 EXPORT_PER_CPU_SYMBOL(softirq_work_list);
578 static void __local_trigger(struct call_single_data *cp, int softirq)
580 struct list_head *head = &__get_cpu_var(softirq_work_list[softirq]);
582 list_add_tail(&cp->list, head);
584 /* Trigger the softirq only if the list was previously empty. */
585 if (head->next == &cp->list)
586 raise_softirq_irqoff(softirq);
589 #ifdef CONFIG_USE_GENERIC_SMP_HELPERS
590 static void remote_softirq_receive(void *data)
592 struct call_single_data *cp = data;
598 local_irq_save(flags);
599 __local_trigger(cp, softirq);
600 local_irq_restore(flags);
603 static int __try_remote_softirq(struct call_single_data *cp, int cpu, int softirq)
605 if (cpu_online(cpu)) {
606 cp->func = remote_softirq_receive;
611 __smp_call_function_single(cpu, cp, 0);
616 #else /* CONFIG_USE_GENERIC_SMP_HELPERS */
617 static int __try_remote_softirq(struct call_single_data *cp, int cpu, int softirq)
624 * __send_remote_softirq - try to schedule softirq work on a remote cpu
625 * @cp: private SMP call function data area
626 * @cpu: the remote cpu
627 * @this_cpu: the currently executing cpu
628 * @softirq: the softirq for the work
630 * Attempt to schedule softirq work on a remote cpu. If this cannot be
631 * done, the work is instead queued up on the local cpu.
633 * Interrupts must be disabled.
635 void __send_remote_softirq(struct call_single_data *cp, int cpu, int this_cpu, int softirq)
637 if (cpu == this_cpu || __try_remote_softirq(cp, cpu, softirq))
638 __local_trigger(cp, softirq);
640 EXPORT_SYMBOL(__send_remote_softirq);
643 * send_remote_softirq - try to schedule softirq work on a remote cpu
644 * @cp: private SMP call function data area
645 * @cpu: the remote cpu
646 * @softirq: the softirq for the work
648 * Like __send_remote_softirq except that disabling interrupts and
649 * computing the current cpu is done for the caller.
651 void send_remote_softirq(struct call_single_data *cp, int cpu, int softirq)
656 local_irq_save(flags);
657 this_cpu = smp_processor_id();
658 __send_remote_softirq(cp, cpu, this_cpu, softirq);
659 local_irq_restore(flags);
661 EXPORT_SYMBOL(send_remote_softirq);
663 static int __cpuinit remote_softirq_cpu_notify(struct notifier_block *self,
664 unsigned long action, void *hcpu)
667 * If a CPU goes away, splice its entries to the current CPU
668 * and trigger a run of the softirq
670 if (action == CPU_DEAD || action == CPU_DEAD_FROZEN) {
671 int cpu = (unsigned long) hcpu;
675 for (i = 0; i < NR_SOFTIRQS; i++) {
676 struct list_head *head = &per_cpu(softirq_work_list[i], cpu);
677 struct list_head *local_head;
679 if (list_empty(head))
682 local_head = &__get_cpu_var(softirq_work_list[i]);
683 list_splice_init(head, local_head);
684 raise_softirq_irqoff(i);
692 static struct notifier_block __cpuinitdata remote_softirq_cpu_notifier = {
693 .notifier_call = remote_softirq_cpu_notify,
696 void __init softirq_init(void)
700 for_each_possible_cpu(cpu) {
703 per_cpu(tasklet_vec, cpu).tail =
704 &per_cpu(tasklet_vec, cpu).head;
705 per_cpu(tasklet_hi_vec, cpu).tail =
706 &per_cpu(tasklet_hi_vec, cpu).head;
707 for (i = 0; i < NR_SOFTIRQS; i++)
708 INIT_LIST_HEAD(&per_cpu(softirq_work_list[i], cpu));
711 register_hotcpu_notifier(&remote_softirq_cpu_notifier);
713 open_softirq(TASKLET_SOFTIRQ, tasklet_action);
714 open_softirq(HI_SOFTIRQ, tasklet_hi_action);
717 static int ksoftirqd(void * __bind_cpu)
719 set_current_state(TASK_INTERRUPTIBLE);
721 while (!kthread_should_stop()) {
723 if (!local_softirq_pending()) {
724 preempt_enable_no_resched();
729 __set_current_state(TASK_RUNNING);
731 while (local_softirq_pending()) {
732 /* Preempt disable stops cpu going offline.
733 If already offline, we'll be on wrong CPU:
735 if (cpu_is_offline((long)__bind_cpu))
738 preempt_enable_no_resched();
741 rcu_sched_qs((long)__bind_cpu);
744 set_current_state(TASK_INTERRUPTIBLE);
746 __set_current_state(TASK_RUNNING);
751 /* Wait for kthread_stop */
752 set_current_state(TASK_INTERRUPTIBLE);
753 while (!kthread_should_stop()) {
755 set_current_state(TASK_INTERRUPTIBLE);
757 __set_current_state(TASK_RUNNING);
761 #ifdef CONFIG_HOTPLUG_CPU
763 * tasklet_kill_immediate is called to remove a tasklet which can already be
764 * scheduled for execution on @cpu.
766 * Unlike tasklet_kill, this function removes the tasklet
767 * _immediately_, even if the tasklet is in TASKLET_STATE_SCHED state.
769 * When this function is called, @cpu must be in the CPU_DEAD state.
771 void tasklet_kill_immediate(struct tasklet_struct *t, unsigned int cpu)
773 struct tasklet_struct **i;
775 BUG_ON(cpu_online(cpu));
776 BUG_ON(test_bit(TASKLET_STATE_RUN, &t->state));
778 if (!test_bit(TASKLET_STATE_SCHED, &t->state))
781 /* CPU is dead, so no lock needed. */
782 for (i = &per_cpu(tasklet_vec, cpu).head; *i; i = &(*i)->next) {
785 /* If this was the tail element, move the tail ptr */
787 per_cpu(tasklet_vec, cpu).tail = i;
794 static void takeover_tasklets(unsigned int cpu)
796 /* CPU is dead, so no lock needed. */
799 /* Find end, append list for that CPU. */
800 if (&per_cpu(tasklet_vec, cpu).head != per_cpu(tasklet_vec, cpu).tail) {
801 *(__get_cpu_var(tasklet_vec).tail) = per_cpu(tasklet_vec, cpu).head;
802 __get_cpu_var(tasklet_vec).tail = per_cpu(tasklet_vec, cpu).tail;
803 per_cpu(tasklet_vec, cpu).head = NULL;
804 per_cpu(tasklet_vec, cpu).tail = &per_cpu(tasklet_vec, cpu).head;
806 raise_softirq_irqoff(TASKLET_SOFTIRQ);
808 if (&per_cpu(tasklet_hi_vec, cpu).head != per_cpu(tasklet_hi_vec, cpu).tail) {
809 *__get_cpu_var(tasklet_hi_vec).tail = per_cpu(tasklet_hi_vec, cpu).head;
810 __get_cpu_var(tasklet_hi_vec).tail = per_cpu(tasklet_hi_vec, cpu).tail;
811 per_cpu(tasklet_hi_vec, cpu).head = NULL;
812 per_cpu(tasklet_hi_vec, cpu).tail = &per_cpu(tasklet_hi_vec, cpu).head;
814 raise_softirq_irqoff(HI_SOFTIRQ);
818 #endif /* CONFIG_HOTPLUG_CPU */
820 static int __cpuinit cpu_callback(struct notifier_block *nfb,
821 unsigned long action,
824 int hotcpu = (unsigned long)hcpu;
825 struct task_struct *p;
829 case CPU_UP_PREPARE_FROZEN:
830 p = kthread_create(ksoftirqd, hcpu, "ksoftirqd/%d", hotcpu);
832 printk("ksoftirqd for %i failed\n", hotcpu);
835 kthread_bind(p, hotcpu);
836 per_cpu(ksoftirqd, hotcpu) = p;
839 case CPU_ONLINE_FROZEN:
840 wake_up_process(per_cpu(ksoftirqd, hotcpu));
842 #ifdef CONFIG_HOTPLUG_CPU
843 case CPU_UP_CANCELED:
844 case CPU_UP_CANCELED_FROZEN:
845 if (!per_cpu(ksoftirqd, hotcpu))
847 /* Unbind so it can run. Fall thru. */
848 kthread_bind(per_cpu(ksoftirqd, hotcpu),
849 cpumask_any(cpu_online_mask));
851 case CPU_DEAD_FROZEN: {
852 struct sched_param param = { .sched_priority = MAX_RT_PRIO-1 };
854 p = per_cpu(ksoftirqd, hotcpu);
855 per_cpu(ksoftirqd, hotcpu) = NULL;
856 sched_setscheduler_nocheck(p, SCHED_FIFO, ¶m);
858 takeover_tasklets(hotcpu);
861 #endif /* CONFIG_HOTPLUG_CPU */
866 static struct notifier_block __cpuinitdata cpu_nfb = {
867 .notifier_call = cpu_callback
870 static __init int spawn_ksoftirqd(void)
872 void *cpu = (void *)(long)smp_processor_id();
873 int err = cpu_callback(&cpu_nfb, CPU_UP_PREPARE, cpu);
875 BUG_ON(err == NOTIFY_BAD);
876 cpu_callback(&cpu_nfb, CPU_ONLINE, cpu);
877 register_cpu_notifier(&cpu_nfb);
880 early_initcall(spawn_ksoftirqd);
884 * Call a function on all processors
886 int on_each_cpu(void (*func) (void *info), void *info, int wait)
891 ret = smp_call_function(func, info, wait);
898 EXPORT_SYMBOL(on_each_cpu);
902 * [ These __weak aliases are kept in a separate compilation unit, so that
903 * GCC does not inline them incorrectly. ]
906 int __init __weak early_irq_init(void)
911 int __init __weak arch_probe_nr_irqs(void)
916 int __init __weak arch_early_irq_init(void)
921 int __weak arch_init_chip_data(struct irq_desc *desc, int node)