4 * Kernel internal timers, basic process system calls
6 * Copyright (C) 1991, 1992 Linus Torvalds
8 * 1997-01-28 Modified by Finn Arne Gangstad to make timers scale better.
10 * 1997-09-10 Updated NTP code according to technical memorandum Jan '96
11 * "A Kernel Model for Precision Timekeeping" by Dave Mills
12 * 1998-12-24 Fixed a xtime SMP race (we need the xtime_lock rw spinlock to
13 * serialize accesses to xtime/lost_ticks).
14 * Copyright (C) 1998 Andrea Arcangeli
15 * 1999-03-10 Improved NTP compatibility by Ulrich Windl
16 * 2002-05-31 Move sys_sysinfo here and make its locking sane, Robert Love
17 * 2000-10-05 Implemented scalable SMP per-CPU timer handling.
18 * Copyright (C) 2000, 2001, 2002 Ingo Molnar
19 * Designed by David S. Miller, Alexey Kuznetsov and Ingo Molnar
22 #include <linux/kernel_stat.h>
23 #include <linux/export.h>
24 #include <linux/interrupt.h>
25 #include <linux/percpu.h>
26 #include <linux/init.h>
28 #include <linux/swap.h>
29 #include <linux/pid_namespace.h>
30 #include <linux/notifier.h>
31 #include <linux/thread_info.h>
32 #include <linux/time.h>
33 #include <linux/jiffies.h>
34 #include <linux/posix-timers.h>
35 #include <linux/cpu.h>
36 #include <linux/syscalls.h>
37 #include <linux/delay.h>
38 #include <linux/tick.h>
39 #include <linux/kallsyms.h>
40 #include <linux/irq_work.h>
41 #include <linux/sched.h>
42 #include <linux/slab.h>
44 #include <asm/uaccess.h>
45 #include <asm/unistd.h>
46 #include <asm/div64.h>
47 #include <asm/timex.h>
50 #define CREATE_TRACE_POINTS
51 #include <trace/events/timer.h>
53 u64 jiffies_64 __cacheline_aligned_in_smp = INITIAL_JIFFIES;
55 EXPORT_SYMBOL(jiffies_64);
58 * per-CPU timer vector definitions:
60 #define TVN_BITS (CONFIG_BASE_SMALL ? 4 : 6)
61 #define TVR_BITS (CONFIG_BASE_SMALL ? 6 : 8)
62 #define TVN_SIZE (1 << TVN_BITS)
63 #define TVR_SIZE (1 << TVR_BITS)
64 #define TVN_MASK (TVN_SIZE - 1)
65 #define TVR_MASK (TVR_SIZE - 1)
66 #define MAX_TVAL ((unsigned long)((1ULL << (TVR_BITS + 4*TVN_BITS)) - 1))
69 struct list_head vec[TVN_SIZE];
73 struct list_head vec[TVR_SIZE];
78 struct timer_list *running_timer;
79 unsigned long timer_jiffies;
80 unsigned long next_timer;
81 unsigned long active_timers;
87 } ____cacheline_aligned;
89 struct tvec_base boot_tvec_bases;
90 EXPORT_SYMBOL(boot_tvec_bases);
91 static DEFINE_PER_CPU(struct tvec_base *, tvec_bases) = &boot_tvec_bases;
93 /* Functions below help us manage 'deferrable' flag */
94 static inline unsigned int tbase_get_deferrable(struct tvec_base *base)
96 return ((unsigned int)(unsigned long)base & TBASE_DEFERRABLE_FLAG);
99 static inline struct tvec_base *tbase_get_base(struct tvec_base *base)
101 return ((struct tvec_base *)((unsigned long)base & ~TBASE_DEFERRABLE_FLAG));
104 static inline void timer_set_deferrable(struct timer_list *timer)
106 timer->base = TBASE_MAKE_DEFERRED(timer->base);
110 timer_set_base(struct timer_list *timer, struct tvec_base *new_base)
112 timer->base = (struct tvec_base *)((unsigned long)(new_base) |
113 tbase_get_deferrable(timer->base));
116 static unsigned long round_jiffies_common(unsigned long j, int cpu,
120 unsigned long original = j;
123 * We don't want all cpus firing their timers at once hitting the
124 * same lock or cachelines, so we skew each extra cpu with an extra
125 * 3 jiffies. This 3 jiffies came originally from the mm/ code which
127 * The skew is done by adding 3*cpunr, then round, then subtract this
128 * extra offset again.
135 * If the target jiffie is just after a whole second (which can happen
136 * due to delays of the timer irq, long irq off times etc etc) then
137 * we should round down to the whole second, not up. Use 1/4th second
138 * as cutoff for this rounding as an extreme upper bound for this.
139 * But never round down if @force_up is set.
141 if (rem < HZ/4 && !force_up) /* round down */
146 /* now that we have rounded, subtract the extra skew again */
149 if (j <= jiffies) /* rounding ate our timeout entirely; */
155 * __round_jiffies - function to round jiffies to a full second
156 * @j: the time in (absolute) jiffies that should be rounded
157 * @cpu: the processor number on which the timeout will happen
159 * __round_jiffies() rounds an absolute time in the future (in jiffies)
160 * up or down to (approximately) full seconds. This is useful for timers
161 * for which the exact time they fire does not matter too much, as long as
162 * they fire approximately every X seconds.
164 * By rounding these timers to whole seconds, all such timers will fire
165 * at the same time, rather than at various times spread out. The goal
166 * of this is to have the CPU wake up less, which saves power.
168 * The exact rounding is skewed for each processor to avoid all
169 * processors firing at the exact same time, which could lead
170 * to lock contention or spurious cache line bouncing.
172 * The return value is the rounded version of the @j parameter.
174 unsigned long __round_jiffies(unsigned long j, int cpu)
176 return round_jiffies_common(j, cpu, false);
178 EXPORT_SYMBOL_GPL(__round_jiffies);
181 * __round_jiffies_relative - function to round jiffies to a full second
182 * @j: the time in (relative) jiffies that should be rounded
183 * @cpu: the processor number on which the timeout will happen
185 * __round_jiffies_relative() rounds a time delta in the future (in jiffies)
186 * up or down to (approximately) full seconds. This is useful for timers
187 * for which the exact time they fire does not matter too much, as long as
188 * they fire approximately every X seconds.
190 * By rounding these timers to whole seconds, all such timers will fire
191 * at the same time, rather than at various times spread out. The goal
192 * of this is to have the CPU wake up less, which saves power.
194 * The exact rounding is skewed for each processor to avoid all
195 * processors firing at the exact same time, which could lead
196 * to lock contention or spurious cache line bouncing.
198 * The return value is the rounded version of the @j parameter.
200 unsigned long __round_jiffies_relative(unsigned long j, int cpu)
202 unsigned long j0 = jiffies;
204 /* Use j0 because jiffies might change while we run */
205 return round_jiffies_common(j + j0, cpu, false) - j0;
207 EXPORT_SYMBOL_GPL(__round_jiffies_relative);
210 * round_jiffies - function to round jiffies to a full second
211 * @j: the time in (absolute) jiffies that should be rounded
213 * round_jiffies() rounds an absolute time in the future (in jiffies)
214 * up or down to (approximately) full seconds. This is useful for timers
215 * for which the exact time they fire does not matter too much, as long as
216 * they fire approximately every X seconds.
218 * By rounding these timers to whole seconds, all such timers will fire
219 * at the same time, rather than at various times spread out. The goal
220 * of this is to have the CPU wake up less, which saves power.
222 * The return value is the rounded version of the @j parameter.
224 unsigned long round_jiffies(unsigned long j)
226 return round_jiffies_common(j, raw_smp_processor_id(), false);
228 EXPORT_SYMBOL_GPL(round_jiffies);
231 * round_jiffies_relative - function to round jiffies to a full second
232 * @j: the time in (relative) jiffies that should be rounded
234 * round_jiffies_relative() rounds a time delta in the future (in jiffies)
235 * up or down to (approximately) full seconds. This is useful for timers
236 * for which the exact time they fire does not matter too much, as long as
237 * they fire approximately every X seconds.
239 * By rounding these timers to whole seconds, all such timers will fire
240 * at the same time, rather than at various times spread out. The goal
241 * of this is to have the CPU wake up less, which saves power.
243 * The return value is the rounded version of the @j parameter.
245 unsigned long round_jiffies_relative(unsigned long j)
247 return __round_jiffies_relative(j, raw_smp_processor_id());
249 EXPORT_SYMBOL_GPL(round_jiffies_relative);
252 * __round_jiffies_up - function to round jiffies up to a full second
253 * @j: the time in (absolute) jiffies that should be rounded
254 * @cpu: the processor number on which the timeout will happen
256 * This is the same as __round_jiffies() except that it will never
257 * round down. This is useful for timeouts for which the exact time
258 * of firing does not matter too much, as long as they don't fire too
261 unsigned long __round_jiffies_up(unsigned long j, int cpu)
263 return round_jiffies_common(j, cpu, true);
265 EXPORT_SYMBOL_GPL(__round_jiffies_up);
268 * __round_jiffies_up_relative - function to round jiffies up to a full second
269 * @j: the time in (relative) jiffies that should be rounded
270 * @cpu: the processor number on which the timeout will happen
272 * This is the same as __round_jiffies_relative() except that it will never
273 * round down. This is useful for timeouts for which the exact time
274 * of firing does not matter too much, as long as they don't fire too
277 unsigned long __round_jiffies_up_relative(unsigned long j, int cpu)
279 unsigned long j0 = jiffies;
281 /* Use j0 because jiffies might change while we run */
282 return round_jiffies_common(j + j0, cpu, true) - j0;
284 EXPORT_SYMBOL_GPL(__round_jiffies_up_relative);
287 * round_jiffies_up - function to round jiffies up to a full second
288 * @j: the time in (absolute) jiffies that should be rounded
290 * This is the same as round_jiffies() except that it will never
291 * round down. This is useful for timeouts for which the exact time
292 * of firing does not matter too much, as long as they don't fire too
295 unsigned long round_jiffies_up(unsigned long j)
297 return round_jiffies_common(j, raw_smp_processor_id(), true);
299 EXPORT_SYMBOL_GPL(round_jiffies_up);
302 * round_jiffies_up_relative - function to round jiffies up to a full second
303 * @j: the time in (relative) jiffies that should be rounded
305 * This is the same as round_jiffies_relative() except that it will never
306 * round down. This is useful for timeouts for which the exact time
307 * of firing does not matter too much, as long as they don't fire too
310 unsigned long round_jiffies_up_relative(unsigned long j)
312 return __round_jiffies_up_relative(j, raw_smp_processor_id());
314 EXPORT_SYMBOL_GPL(round_jiffies_up_relative);
317 * set_timer_slack - set the allowed slack for a timer
318 * @timer: the timer to be modified
319 * @slack_hz: the amount of time (in jiffies) allowed for rounding
321 * Set the amount of time, in jiffies, that a certain timer has
322 * in terms of slack. By setting this value, the timer subsystem
323 * will schedule the actual timer somewhere between
324 * the time mod_timer() asks for, and that time plus the slack.
326 * By setting the slack to -1, a percentage of the delay is used
329 void set_timer_slack(struct timer_list *timer, int slack_hz)
331 timer->slack = slack_hz;
333 EXPORT_SYMBOL_GPL(set_timer_slack);
336 __internal_add_timer(struct tvec_base *base, struct timer_list *timer)
338 unsigned long expires = timer->expires;
339 unsigned long idx = expires - base->timer_jiffies;
340 struct list_head *vec;
342 if (idx < TVR_SIZE) {
343 int i = expires & TVR_MASK;
344 vec = base->tv1.vec + i;
345 } else if (idx < 1 << (TVR_BITS + TVN_BITS)) {
346 int i = (expires >> TVR_BITS) & TVN_MASK;
347 vec = base->tv2.vec + i;
348 } else if (idx < 1 << (TVR_BITS + 2 * TVN_BITS)) {
349 int i = (expires >> (TVR_BITS + TVN_BITS)) & TVN_MASK;
350 vec = base->tv3.vec + i;
351 } else if (idx < 1 << (TVR_BITS + 3 * TVN_BITS)) {
352 int i = (expires >> (TVR_BITS + 2 * TVN_BITS)) & TVN_MASK;
353 vec = base->tv4.vec + i;
354 } else if ((signed long) idx < 0) {
356 * Can happen if you add a timer with expires == jiffies,
357 * or you set a timer to go off in the past
359 vec = base->tv1.vec + (base->timer_jiffies & TVR_MASK);
362 /* If the timeout is larger than MAX_TVAL (on 64-bit
363 * architectures or with CONFIG_BASE_SMALL=1) then we
364 * use the maximum timeout.
366 if (idx > MAX_TVAL) {
368 expires = idx + base->timer_jiffies;
370 i = (expires >> (TVR_BITS + 3 * TVN_BITS)) & TVN_MASK;
371 vec = base->tv5.vec + i;
376 list_add_tail(&timer->entry, vec);
379 static void internal_add_timer(struct tvec_base *base, struct timer_list *timer)
381 __internal_add_timer(base, timer);
383 * Update base->active_timers and base->next_timer
385 if (!tbase_get_deferrable(timer->base)) {
386 if (time_before(timer->expires, base->next_timer))
387 base->next_timer = timer->expires;
388 base->active_timers++;
392 #ifdef CONFIG_TIMER_STATS
393 void __timer_stats_timer_set_start_info(struct timer_list *timer, void *addr)
395 if (timer->start_site)
398 timer->start_site = addr;
399 memcpy(timer->start_comm, current->comm, TASK_COMM_LEN);
400 timer->start_pid = current->pid;
403 static void timer_stats_account_timer(struct timer_list *timer)
405 unsigned int flag = 0;
407 if (likely(!timer->start_site))
409 if (unlikely(tbase_get_deferrable(timer->base)))
410 flag |= TIMER_STATS_FLAG_DEFERRABLE;
412 timer_stats_update_stats(timer, timer->start_pid, timer->start_site,
413 timer->function, timer->start_comm, flag);
417 static void timer_stats_account_timer(struct timer_list *timer) {}
420 #ifdef CONFIG_DEBUG_OBJECTS_TIMERS
422 static struct debug_obj_descr timer_debug_descr;
424 static void *timer_debug_hint(void *addr)
426 return ((struct timer_list *) addr)->function;
430 * fixup_init is called when:
431 * - an active object is initialized
433 static int timer_fixup_init(void *addr, enum debug_obj_state state)
435 struct timer_list *timer = addr;
438 case ODEBUG_STATE_ACTIVE:
439 del_timer_sync(timer);
440 debug_object_init(timer, &timer_debug_descr);
447 /* Stub timer callback for improperly used timers. */
448 static void stub_timer(unsigned long data)
454 * fixup_activate is called when:
455 * - an active object is activated
456 * - an unknown object is activated (might be a statically initialized object)
458 static int timer_fixup_activate(void *addr, enum debug_obj_state state)
460 struct timer_list *timer = addr;
464 case ODEBUG_STATE_NOTAVAILABLE:
466 * This is not really a fixup. The timer was
467 * statically initialized. We just make sure that it
468 * is tracked in the object tracker.
470 if (timer->entry.next == NULL &&
471 timer->entry.prev == TIMER_ENTRY_STATIC) {
472 debug_object_init(timer, &timer_debug_descr);
473 debug_object_activate(timer, &timer_debug_descr);
476 setup_timer(timer, stub_timer, 0);
481 case ODEBUG_STATE_ACTIVE:
490 * fixup_free is called when:
491 * - an active object is freed
493 static int timer_fixup_free(void *addr, enum debug_obj_state state)
495 struct timer_list *timer = addr;
498 case ODEBUG_STATE_ACTIVE:
499 del_timer_sync(timer);
500 debug_object_free(timer, &timer_debug_descr);
508 * fixup_assert_init is called when:
509 * - an untracked/uninit-ed object is found
511 static int timer_fixup_assert_init(void *addr, enum debug_obj_state state)
513 struct timer_list *timer = addr;
516 case ODEBUG_STATE_NOTAVAILABLE:
517 if (timer->entry.prev == TIMER_ENTRY_STATIC) {
519 * This is not really a fixup. The timer was
520 * statically initialized. We just make sure that it
521 * is tracked in the object tracker.
523 debug_object_init(timer, &timer_debug_descr);
526 setup_timer(timer, stub_timer, 0);
534 static struct debug_obj_descr timer_debug_descr = {
535 .name = "timer_list",
536 .debug_hint = timer_debug_hint,
537 .fixup_init = timer_fixup_init,
538 .fixup_activate = timer_fixup_activate,
539 .fixup_free = timer_fixup_free,
540 .fixup_assert_init = timer_fixup_assert_init,
543 static inline void debug_timer_init(struct timer_list *timer)
545 debug_object_init(timer, &timer_debug_descr);
548 static inline void debug_timer_activate(struct timer_list *timer)
550 debug_object_activate(timer, &timer_debug_descr);
553 static inline void debug_timer_deactivate(struct timer_list *timer)
555 debug_object_deactivate(timer, &timer_debug_descr);
558 static inline void debug_timer_free(struct timer_list *timer)
560 debug_object_free(timer, &timer_debug_descr);
563 static inline void debug_timer_assert_init(struct timer_list *timer)
565 debug_object_assert_init(timer, &timer_debug_descr);
568 static void __init_timer(struct timer_list *timer,
570 struct lock_class_key *key);
572 void init_timer_on_stack_key(struct timer_list *timer,
574 struct lock_class_key *key)
576 debug_object_init_on_stack(timer, &timer_debug_descr);
577 __init_timer(timer, name, key);
579 EXPORT_SYMBOL_GPL(init_timer_on_stack_key);
581 void destroy_timer_on_stack(struct timer_list *timer)
583 debug_object_free(timer, &timer_debug_descr);
585 EXPORT_SYMBOL_GPL(destroy_timer_on_stack);
588 static inline void debug_timer_init(struct timer_list *timer) { }
589 static inline void debug_timer_activate(struct timer_list *timer) { }
590 static inline void debug_timer_deactivate(struct timer_list *timer) { }
591 static inline void debug_timer_assert_init(struct timer_list *timer) { }
594 static inline void debug_init(struct timer_list *timer)
596 debug_timer_init(timer);
597 trace_timer_init(timer);
601 debug_activate(struct timer_list *timer, unsigned long expires)
603 debug_timer_activate(timer);
604 trace_timer_start(timer, expires);
607 static inline void debug_deactivate(struct timer_list *timer)
609 debug_timer_deactivate(timer);
610 trace_timer_cancel(timer);
613 static inline void debug_assert_init(struct timer_list *timer)
615 debug_timer_assert_init(timer);
618 static void __init_timer(struct timer_list *timer,
620 struct lock_class_key *key)
622 timer->entry.next = NULL;
623 timer->base = __raw_get_cpu_var(tvec_bases);
625 #ifdef CONFIG_TIMER_STATS
626 timer->start_site = NULL;
627 timer->start_pid = -1;
628 memset(timer->start_comm, 0, TASK_COMM_LEN);
630 lockdep_init_map(&timer->lockdep_map, name, key, 0);
633 void setup_deferrable_timer_on_stack_key(struct timer_list *timer,
635 struct lock_class_key *key,
636 void (*function)(unsigned long),
639 timer->function = function;
641 init_timer_on_stack_key(timer, name, key);
642 timer_set_deferrable(timer);
644 EXPORT_SYMBOL_GPL(setup_deferrable_timer_on_stack_key);
647 * init_timer_key - initialize a timer
648 * @timer: the timer to be initialized
649 * @name: name of the timer
650 * @key: lockdep class key of the fake lock used for tracking timer
651 * sync lock dependencies
653 * init_timer_key() must be done to a timer prior calling *any* of the
654 * other timer functions.
656 void init_timer_key(struct timer_list *timer,
658 struct lock_class_key *key)
661 __init_timer(timer, name, key);
663 EXPORT_SYMBOL(init_timer_key);
665 void init_timer_deferrable_key(struct timer_list *timer,
667 struct lock_class_key *key)
669 init_timer_key(timer, name, key);
670 timer_set_deferrable(timer);
672 EXPORT_SYMBOL(init_timer_deferrable_key);
674 static inline void detach_timer(struct timer_list *timer, bool clear_pending)
676 struct list_head *entry = &timer->entry;
678 debug_deactivate(timer);
680 __list_del(entry->prev, entry->next);
683 entry->prev = LIST_POISON2;
687 detach_expired_timer(struct timer_list *timer, struct tvec_base *base)
689 detach_timer(timer, true);
690 if (!tbase_get_deferrable(timer->base))
691 timer->base->active_timers--;
694 static int detach_if_pending(struct timer_list *timer, struct tvec_base *base,
697 if (!timer_pending(timer))
700 detach_timer(timer, clear_pending);
701 if (!tbase_get_deferrable(timer->base)) {
702 timer->base->active_timers--;
703 if (timer->expires == base->next_timer)
704 base->next_timer = base->timer_jiffies;
710 * We are using hashed locking: holding per_cpu(tvec_bases).lock
711 * means that all timers which are tied to this base via timer->base are
712 * locked, and the base itself is locked too.
714 * So __run_timers/migrate_timers can safely modify all timers which could
715 * be found on ->tvX lists.
717 * When the timer's base is locked, and the timer removed from list, it is
718 * possible to set timer->base = NULL and drop the lock: the timer remains
721 static struct tvec_base *lock_timer_base(struct timer_list *timer,
722 unsigned long *flags)
723 __acquires(timer->base->lock)
725 struct tvec_base *base;
728 struct tvec_base *prelock_base = timer->base;
729 base = tbase_get_base(prelock_base);
730 if (likely(base != NULL)) {
731 spin_lock_irqsave(&base->lock, *flags);
732 if (likely(prelock_base == timer->base))
734 /* The timer has migrated to another CPU */
735 spin_unlock_irqrestore(&base->lock, *flags);
742 __mod_timer(struct timer_list *timer, unsigned long expires,
743 bool pending_only, int pinned)
745 struct tvec_base *base, *new_base;
749 timer_stats_timer_set_start_info(timer);
750 BUG_ON(!timer->function);
752 base = lock_timer_base(timer, &flags);
754 ret = detach_if_pending(timer, base, false);
755 if (!ret && pending_only)
758 debug_activate(timer, expires);
760 cpu = smp_processor_id();
762 #if defined(CONFIG_NO_HZ) && defined(CONFIG_SMP)
763 if (!pinned && get_sysctl_timer_migration() && idle_cpu(cpu))
764 cpu = get_nohz_timer_target();
766 new_base = per_cpu(tvec_bases, cpu);
768 if (base != new_base) {
770 * We are trying to schedule the timer on the local CPU.
771 * However we can't change timer's base while it is running,
772 * otherwise del_timer_sync() can't detect that the timer's
773 * handler yet has not finished. This also guarantees that
774 * the timer is serialized wrt itself.
776 if (likely(base->running_timer != timer)) {
777 /* See the comment in lock_timer_base() */
778 timer_set_base(timer, NULL);
779 spin_unlock(&base->lock);
781 spin_lock(&base->lock);
782 timer_set_base(timer, base);
786 timer->expires = expires;
787 internal_add_timer(base, timer);
790 spin_unlock_irqrestore(&base->lock, flags);
796 * mod_timer_pending - modify a pending timer's timeout
797 * @timer: the pending timer to be modified
798 * @expires: new timeout in jiffies
800 * mod_timer_pending() is the same for pending timers as mod_timer(),
801 * but will not re-activate and modify already deleted timers.
803 * It is useful for unserialized use of timers.
805 int mod_timer_pending(struct timer_list *timer, unsigned long expires)
807 return __mod_timer(timer, expires, true, TIMER_NOT_PINNED);
809 EXPORT_SYMBOL(mod_timer_pending);
812 * Decide where to put the timer while taking the slack into account
815 * 1) calculate the maximum (absolute) time
816 * 2) calculate the highest bit where the expires and new max are different
817 * 3) use this bit to make a mask
818 * 4) use the bitmask to round down the maximum time, so that all last
822 unsigned long apply_slack(struct timer_list *timer, unsigned long expires)
824 unsigned long expires_limit, mask;
827 if (timer->slack >= 0) {
828 expires_limit = expires + timer->slack;
830 long delta = expires - jiffies;
835 expires_limit = expires + delta / 256;
837 mask = expires ^ expires_limit;
841 bit = find_last_bit(&mask, BITS_PER_LONG);
843 mask = (1 << bit) - 1;
845 expires_limit = expires_limit & ~(mask);
847 return expires_limit;
851 * mod_timer - modify a timer's timeout
852 * @timer: the timer to be modified
853 * @expires: new timeout in jiffies
855 * mod_timer() is a more efficient way to update the expire field of an
856 * active timer (if the timer is inactive it will be activated)
858 * mod_timer(timer, expires) is equivalent to:
860 * del_timer(timer); timer->expires = expires; add_timer(timer);
862 * Note that if there are multiple unserialized concurrent users of the
863 * same timer, then mod_timer() is the only safe way to modify the timeout,
864 * since add_timer() cannot modify an already running timer.
866 * The function returns whether it has modified a pending timer or not.
867 * (ie. mod_timer() of an inactive timer returns 0, mod_timer() of an
868 * active timer returns 1.)
870 int mod_timer(struct timer_list *timer, unsigned long expires)
872 expires = apply_slack(timer, expires);
875 * This is a common optimization triggered by the
876 * networking code - if the timer is re-modified
877 * to be the same thing then just return:
879 if (timer_pending(timer) && timer->expires == expires)
882 return __mod_timer(timer, expires, false, TIMER_NOT_PINNED);
884 EXPORT_SYMBOL(mod_timer);
887 * mod_timer_pinned - modify a timer's timeout
888 * @timer: the timer to be modified
889 * @expires: new timeout in jiffies
891 * mod_timer_pinned() is a way to update the expire field of an
892 * active timer (if the timer is inactive it will be activated)
893 * and to ensure that the timer is scheduled on the current CPU.
895 * Note that this does not prevent the timer from being migrated
896 * when the current CPU goes offline. If this is a problem for
897 * you, use CPU-hotplug notifiers to handle it correctly, for
898 * example, cancelling the timer when the corresponding CPU goes
901 * mod_timer_pinned(timer, expires) is equivalent to:
903 * del_timer(timer); timer->expires = expires; add_timer(timer);
905 int mod_timer_pinned(struct timer_list *timer, unsigned long expires)
907 if (timer->expires == expires && timer_pending(timer))
910 return __mod_timer(timer, expires, false, TIMER_PINNED);
912 EXPORT_SYMBOL(mod_timer_pinned);
915 * add_timer - start a timer
916 * @timer: the timer to be added
918 * The kernel will do a ->function(->data) callback from the
919 * timer interrupt at the ->expires point in the future. The
920 * current time is 'jiffies'.
922 * The timer's ->expires, ->function (and if the handler uses it, ->data)
923 * fields must be set prior calling this function.
925 * Timers with an ->expires field in the past will be executed in the next
928 void add_timer(struct timer_list *timer)
930 BUG_ON(timer_pending(timer));
931 mod_timer(timer, timer->expires);
933 EXPORT_SYMBOL(add_timer);
936 * add_timer_on - start a timer on a particular CPU
937 * @timer: the timer to be added
938 * @cpu: the CPU to start it on
940 * This is not very scalable on SMP. Double adds are not possible.
942 void add_timer_on(struct timer_list *timer, int cpu)
944 struct tvec_base *base = per_cpu(tvec_bases, cpu);
947 timer_stats_timer_set_start_info(timer);
948 BUG_ON(timer_pending(timer) || !timer->function);
949 spin_lock_irqsave(&base->lock, flags);
950 timer_set_base(timer, base);
951 debug_activate(timer, timer->expires);
952 internal_add_timer(base, timer);
954 * Check whether the other CPU is idle and needs to be
955 * triggered to reevaluate the timer wheel when nohz is
956 * active. We are protected against the other CPU fiddling
957 * with the timer by holding the timer base lock. This also
958 * makes sure that a CPU on the way to idle can not evaluate
961 wake_up_idle_cpu(cpu);
962 spin_unlock_irqrestore(&base->lock, flags);
964 EXPORT_SYMBOL_GPL(add_timer_on);
967 * del_timer - deactive a timer.
968 * @timer: the timer to be deactivated
970 * del_timer() deactivates a timer - this works on both active and inactive
973 * The function returns whether it has deactivated a pending timer or not.
974 * (ie. del_timer() of an inactive timer returns 0, del_timer() of an
975 * active timer returns 1.)
977 int del_timer(struct timer_list *timer)
979 struct tvec_base *base;
983 debug_assert_init(timer);
985 timer_stats_timer_clear_start_info(timer);
986 if (timer_pending(timer)) {
987 base = lock_timer_base(timer, &flags);
988 ret = detach_if_pending(timer, base, true);
989 spin_unlock_irqrestore(&base->lock, flags);
994 EXPORT_SYMBOL(del_timer);
997 * try_to_del_timer_sync - Try to deactivate a timer
998 * @timer: timer do del
1000 * This function tries to deactivate a timer. Upon successful (ret >= 0)
1001 * exit the timer is not queued and the handler is not running on any CPU.
1003 int try_to_del_timer_sync(struct timer_list *timer)
1005 struct tvec_base *base;
1006 unsigned long flags;
1009 debug_assert_init(timer);
1011 base = lock_timer_base(timer, &flags);
1013 if (base->running_timer != timer) {
1014 timer_stats_timer_clear_start_info(timer);
1015 ret = detach_if_pending(timer, base, true);
1017 spin_unlock_irqrestore(&base->lock, flags);
1021 EXPORT_SYMBOL(try_to_del_timer_sync);
1025 * del_timer_sync - deactivate a timer and wait for the handler to finish.
1026 * @timer: the timer to be deactivated
1028 * This function only differs from del_timer() on SMP: besides deactivating
1029 * the timer it also makes sure the handler has finished executing on other
1032 * Synchronization rules: Callers must prevent restarting of the timer,
1033 * otherwise this function is meaningless. It must not be called from
1034 * interrupt contexts. The caller must not hold locks which would prevent
1035 * completion of the timer's handler. The timer's handler must not call
1036 * add_timer_on(). Upon exit the timer is not queued and the handler is
1037 * not running on any CPU.
1039 * Note: You must not hold locks that are held in interrupt context
1040 * while calling this function. Even if the lock has nothing to do
1041 * with the timer in question. Here's why:
1047 * base->running_timer = mytimer;
1048 * spin_lock_irq(somelock);
1050 * spin_lock(somelock);
1051 * del_timer_sync(mytimer);
1052 * while (base->running_timer == mytimer);
1054 * Now del_timer_sync() will never return and never release somelock.
1055 * The interrupt on the other CPU is waiting to grab somelock but
1056 * it has interrupted the softirq that CPU0 is waiting to finish.
1058 * The function returns whether it has deactivated a pending timer or not.
1060 int del_timer_sync(struct timer_list *timer)
1062 #ifdef CONFIG_LOCKDEP
1063 unsigned long flags;
1066 * If lockdep gives a backtrace here, please reference
1067 * the synchronization rules above.
1069 local_irq_save(flags);
1070 lock_map_acquire(&timer->lockdep_map);
1071 lock_map_release(&timer->lockdep_map);
1072 local_irq_restore(flags);
1075 * don't use it in hardirq context, because it
1076 * could lead to deadlock.
1080 int ret = try_to_del_timer_sync(timer);
1086 EXPORT_SYMBOL(del_timer_sync);
1089 static int cascade(struct tvec_base *base, struct tvec *tv, int index)
1091 /* cascade all the timers from tv up one level */
1092 struct timer_list *timer, *tmp;
1093 struct list_head tv_list;
1095 list_replace_init(tv->vec + index, &tv_list);
1098 * We are removing _all_ timers from the list, so we
1099 * don't have to detach them individually.
1101 list_for_each_entry_safe(timer, tmp, &tv_list, entry) {
1102 BUG_ON(tbase_get_base(timer->base) != base);
1103 /* No accounting, while moving them */
1104 __internal_add_timer(base, timer);
1110 static void call_timer_fn(struct timer_list *timer, void (*fn)(unsigned long),
1113 int preempt_count = preempt_count();
1115 #ifdef CONFIG_LOCKDEP
1117 * It is permissible to free the timer from inside the
1118 * function that is called from it, this we need to take into
1119 * account for lockdep too. To avoid bogus "held lock freed"
1120 * warnings as well as problems when looking into
1121 * timer->lockdep_map, make a copy and use that here.
1123 struct lockdep_map lockdep_map;
1125 lockdep_copy_map(&lockdep_map, &timer->lockdep_map);
1128 * Couple the lock chain with the lock chain at
1129 * del_timer_sync() by acquiring the lock_map around the fn()
1130 * call here and in del_timer_sync().
1132 lock_map_acquire(&lockdep_map);
1134 trace_timer_expire_entry(timer);
1136 trace_timer_expire_exit(timer);
1138 lock_map_release(&lockdep_map);
1140 if (preempt_count != preempt_count()) {
1141 WARN_ONCE(1, "timer: %pF preempt leak: %08x -> %08x\n",
1142 fn, preempt_count, preempt_count());
1144 * Restore the preempt count. That gives us a decent
1145 * chance to survive and extract information. If the
1146 * callback kept a lock held, bad luck, but not worse
1147 * than the BUG() we had.
1149 preempt_count() = preempt_count;
1153 #define INDEX(N) ((base->timer_jiffies >> (TVR_BITS + (N) * TVN_BITS)) & TVN_MASK)
1156 * __run_timers - run all expired timers (if any) on this CPU.
1157 * @base: the timer vector to be processed.
1159 * This function cascades all vectors and executes all expired timer
1162 static inline void __run_timers(struct tvec_base *base)
1164 struct timer_list *timer;
1166 spin_lock_irq(&base->lock);
1167 while (time_after_eq(jiffies, base->timer_jiffies)) {
1168 struct list_head work_list;
1169 struct list_head *head = &work_list;
1170 int index = base->timer_jiffies & TVR_MASK;
1176 (!cascade(base, &base->tv2, INDEX(0))) &&
1177 (!cascade(base, &base->tv3, INDEX(1))) &&
1178 !cascade(base, &base->tv4, INDEX(2)))
1179 cascade(base, &base->tv5, INDEX(3));
1180 ++base->timer_jiffies;
1181 list_replace_init(base->tv1.vec + index, &work_list);
1182 while (!list_empty(head)) {
1183 void (*fn)(unsigned long);
1186 timer = list_first_entry(head, struct timer_list,entry);
1187 fn = timer->function;
1190 timer_stats_account_timer(timer);
1192 base->running_timer = timer;
1193 detach_expired_timer(timer, base);
1195 spin_unlock_irq(&base->lock);
1196 call_timer_fn(timer, fn, data);
1197 spin_lock_irq(&base->lock);
1200 base->running_timer = NULL;
1201 spin_unlock_irq(&base->lock);
1206 * Find out when the next timer event is due to happen. This
1207 * is used on S/390 to stop all activity when a CPU is idle.
1208 * This function needs to be called with interrupts disabled.
1210 static unsigned long __next_timer_interrupt(struct tvec_base *base)
1212 unsigned long timer_jiffies = base->timer_jiffies;
1213 unsigned long expires = timer_jiffies + NEXT_TIMER_MAX_DELTA;
1214 int index, slot, array, found = 0;
1215 struct timer_list *nte;
1216 struct tvec *varray[4];
1218 /* Look for timer events in tv1. */
1219 index = slot = timer_jiffies & TVR_MASK;
1221 list_for_each_entry(nte, base->tv1.vec + slot, entry) {
1222 if (tbase_get_deferrable(nte->base))
1226 expires = nte->expires;
1227 /* Look at the cascade bucket(s)? */
1228 if (!index || slot < index)
1232 slot = (slot + 1) & TVR_MASK;
1233 } while (slot != index);
1236 /* Calculate the next cascade event */
1238 timer_jiffies += TVR_SIZE - index;
1239 timer_jiffies >>= TVR_BITS;
1241 /* Check tv2-tv5. */
1242 varray[0] = &base->tv2;
1243 varray[1] = &base->tv3;
1244 varray[2] = &base->tv4;
1245 varray[3] = &base->tv5;
1247 for (array = 0; array < 4; array++) {
1248 struct tvec *varp = varray[array];
1250 index = slot = timer_jiffies & TVN_MASK;
1252 list_for_each_entry(nte, varp->vec + slot, entry) {
1253 if (tbase_get_deferrable(nte->base))
1257 if (time_before(nte->expires, expires))
1258 expires = nte->expires;
1261 * Do we still search for the first timer or are
1262 * we looking up the cascade buckets ?
1265 /* Look at the cascade bucket(s)? */
1266 if (!index || slot < index)
1270 slot = (slot + 1) & TVN_MASK;
1271 } while (slot != index);
1274 timer_jiffies += TVN_SIZE - index;
1275 timer_jiffies >>= TVN_BITS;
1281 * Check, if the next hrtimer event is before the next timer wheel
1284 static unsigned long cmp_next_hrtimer_event(unsigned long now,
1285 unsigned long expires)
1287 ktime_t hr_delta = hrtimer_get_next_event();
1288 struct timespec tsdelta;
1289 unsigned long delta;
1291 if (hr_delta.tv64 == KTIME_MAX)
1295 * Expired timer available, let it expire in the next tick
1297 if (hr_delta.tv64 <= 0)
1300 tsdelta = ktime_to_timespec(hr_delta);
1301 delta = timespec_to_jiffies(&tsdelta);
1304 * Limit the delta to the max value, which is checked in
1305 * tick_nohz_stop_sched_tick():
1307 if (delta > NEXT_TIMER_MAX_DELTA)
1308 delta = NEXT_TIMER_MAX_DELTA;
1311 * Take rounding errors in to account and make sure, that it
1312 * expires in the next tick. Otherwise we go into an endless
1313 * ping pong due to tick_nohz_stop_sched_tick() retriggering
1319 if (time_before(now, expires))
1325 * get_next_timer_interrupt - return the jiffy of the next pending timer
1326 * @now: current time (in jiffies)
1328 unsigned long get_next_timer_interrupt(unsigned long now)
1330 struct tvec_base *base = __this_cpu_read(tvec_bases);
1331 unsigned long expires = now + NEXT_TIMER_MAX_DELTA;
1334 * Pretend that there is no timer pending if the cpu is offline.
1335 * Possible pending timers will be migrated later to an active cpu.
1337 if (cpu_is_offline(smp_processor_id()))
1340 spin_lock(&base->lock);
1341 if (base->active_timers) {
1342 if (time_before_eq(base->next_timer, base->timer_jiffies))
1343 base->next_timer = __next_timer_interrupt(base);
1344 expires = base->next_timer;
1346 spin_unlock(&base->lock);
1348 if (time_before_eq(expires, now))
1351 return cmp_next_hrtimer_event(now, expires);
1356 * Called from the timer interrupt handler to charge one tick to the current
1357 * process. user_tick is 1 if the tick is user time, 0 for system.
1359 void update_process_times(int user_tick)
1361 struct task_struct *p = current;
1362 int cpu = smp_processor_id();
1364 /* Note: this timer irq context must be accounted for as well. */
1365 account_process_tick(p, user_tick);
1367 rcu_check_callbacks(cpu, user_tick);
1369 #ifdef CONFIG_IRQ_WORK
1374 run_posix_cpu_timers(p);
1378 * This function runs timers and the timer-tq in bottom half context.
1380 static void run_timer_softirq(struct softirq_action *h)
1382 struct tvec_base *base = __this_cpu_read(tvec_bases);
1384 hrtimer_run_pending();
1386 if (time_after_eq(jiffies, base->timer_jiffies))
1391 * Called by the local, per-CPU timer interrupt on SMP.
1393 void run_local_timers(void)
1395 hrtimer_run_queues();
1396 raise_softirq(TIMER_SOFTIRQ);
1399 #ifdef __ARCH_WANT_SYS_ALARM
1402 * For backwards compatibility? This can be done in libc so Alpha
1403 * and all newer ports shouldn't need it.
1405 SYSCALL_DEFINE1(alarm, unsigned int, seconds)
1407 return alarm_setitimer(seconds);
1413 * sys_getpid - return the thread group id of the current process
1415 * Note, despite the name, this returns the tgid not the pid. The tgid and
1416 * the pid are identical unless CLONE_THREAD was specified on clone() in
1417 * which case the tgid is the same in all threads of the same group.
1419 * This is SMP safe as current->tgid does not change.
1421 SYSCALL_DEFINE0(getpid)
1423 return task_tgid_vnr(current);
1427 * Accessing ->real_parent is not SMP-safe, it could
1428 * change from under us. However, we can use a stale
1429 * value of ->real_parent under rcu_read_lock(), see
1430 * release_task()->call_rcu(delayed_put_task_struct).
1432 SYSCALL_DEFINE0(getppid)
1437 pid = task_tgid_vnr(rcu_dereference(current->real_parent));
1443 SYSCALL_DEFINE0(getuid)
1445 /* Only we change this so SMP safe */
1446 return from_kuid_munged(current_user_ns(), current_uid());
1449 SYSCALL_DEFINE0(geteuid)
1451 /* Only we change this so SMP safe */
1452 return from_kuid_munged(current_user_ns(), current_euid());
1455 SYSCALL_DEFINE0(getgid)
1457 /* Only we change this so SMP safe */
1458 return from_kgid_munged(current_user_ns(), current_gid());
1461 SYSCALL_DEFINE0(getegid)
1463 /* Only we change this so SMP safe */
1464 return from_kgid_munged(current_user_ns(), current_egid());
1467 static void process_timeout(unsigned long __data)
1469 wake_up_process((struct task_struct *)__data);
1473 * schedule_timeout - sleep until timeout
1474 * @timeout: timeout value in jiffies
1476 * Make the current task sleep until @timeout jiffies have
1477 * elapsed. The routine will return immediately unless
1478 * the current task state has been set (see set_current_state()).
1480 * You can set the task state as follows -
1482 * %TASK_UNINTERRUPTIBLE - at least @timeout jiffies are guaranteed to
1483 * pass before the routine returns. The routine will return 0
1485 * %TASK_INTERRUPTIBLE - the routine may return early if a signal is
1486 * delivered to the current task. In this case the remaining time
1487 * in jiffies will be returned, or 0 if the timer expired in time
1489 * The current task state is guaranteed to be TASK_RUNNING when this
1492 * Specifying a @timeout value of %MAX_SCHEDULE_TIMEOUT will schedule
1493 * the CPU away without a bound on the timeout. In this case the return
1494 * value will be %MAX_SCHEDULE_TIMEOUT.
1496 * In all cases the return value is guaranteed to be non-negative.
1498 signed long __sched schedule_timeout(signed long timeout)
1500 struct timer_list timer;
1501 unsigned long expire;
1505 case MAX_SCHEDULE_TIMEOUT:
1507 * These two special cases are useful to be comfortable
1508 * in the caller. Nothing more. We could take
1509 * MAX_SCHEDULE_TIMEOUT from one of the negative value
1510 * but I' d like to return a valid offset (>=0) to allow
1511 * the caller to do everything it want with the retval.
1517 * Another bit of PARANOID. Note that the retval will be
1518 * 0 since no piece of kernel is supposed to do a check
1519 * for a negative retval of schedule_timeout() (since it
1520 * should never happens anyway). You just have the printk()
1521 * that will tell you if something is gone wrong and where.
1524 printk(KERN_ERR "schedule_timeout: wrong timeout "
1525 "value %lx\n", timeout);
1527 current->state = TASK_RUNNING;
1532 expire = timeout + jiffies;
1534 setup_timer_on_stack(&timer, process_timeout, (unsigned long)current);
1535 __mod_timer(&timer, expire, false, TIMER_NOT_PINNED);
1537 del_singleshot_timer_sync(&timer);
1539 /* Remove the timer from the object tracker */
1540 destroy_timer_on_stack(&timer);
1542 timeout = expire - jiffies;
1545 return timeout < 0 ? 0 : timeout;
1547 EXPORT_SYMBOL(schedule_timeout);
1550 * We can use __set_current_state() here because schedule_timeout() calls
1551 * schedule() unconditionally.
1553 signed long __sched schedule_timeout_interruptible(signed long timeout)
1555 __set_current_state(TASK_INTERRUPTIBLE);
1556 return schedule_timeout(timeout);
1558 EXPORT_SYMBOL(schedule_timeout_interruptible);
1560 signed long __sched schedule_timeout_killable(signed long timeout)
1562 __set_current_state(TASK_KILLABLE);
1563 return schedule_timeout(timeout);
1565 EXPORT_SYMBOL(schedule_timeout_killable);
1567 signed long __sched schedule_timeout_uninterruptible(signed long timeout)
1569 __set_current_state(TASK_UNINTERRUPTIBLE);
1570 return schedule_timeout(timeout);
1572 EXPORT_SYMBOL(schedule_timeout_uninterruptible);
1574 /* Thread ID - the internal kernel "pid" */
1575 SYSCALL_DEFINE0(gettid)
1577 return task_pid_vnr(current);
1581 * do_sysinfo - fill in sysinfo struct
1582 * @info: pointer to buffer to fill
1584 int do_sysinfo(struct sysinfo *info)
1586 unsigned long mem_total, sav_total;
1587 unsigned int mem_unit, bitcount;
1590 memset(info, 0, sizeof(struct sysinfo));
1593 monotonic_to_bootbased(&tp);
1594 info->uptime = tp.tv_sec + (tp.tv_nsec ? 1 : 0);
1596 get_avenrun(info->loads, 0, SI_LOAD_SHIFT - FSHIFT);
1598 info->procs = nr_threads;
1604 * If the sum of all the available memory (i.e. ram + swap)
1605 * is less than can be stored in a 32 bit unsigned long then
1606 * we can be binary compatible with 2.2.x kernels. If not,
1607 * well, in that case 2.2.x was broken anyways...
1609 * -Erik Andersen <andersee@debian.org>
1612 mem_total = info->totalram + info->totalswap;
1613 if (mem_total < info->totalram || mem_total < info->totalswap)
1616 mem_unit = info->mem_unit;
1617 while (mem_unit > 1) {
1620 sav_total = mem_total;
1622 if (mem_total < sav_total)
1627 * If mem_total did not overflow, multiply all memory values by
1628 * info->mem_unit and set it to 1. This leaves things compatible
1629 * with 2.2.x, and also retains compatibility with earlier 2.4.x
1634 info->totalram <<= bitcount;
1635 info->freeram <<= bitcount;
1636 info->sharedram <<= bitcount;
1637 info->bufferram <<= bitcount;
1638 info->totalswap <<= bitcount;
1639 info->freeswap <<= bitcount;
1640 info->totalhigh <<= bitcount;
1641 info->freehigh <<= bitcount;
1647 SYSCALL_DEFINE1(sysinfo, struct sysinfo __user *, info)
1653 if (copy_to_user(info, &val, sizeof(struct sysinfo)))
1659 static int __cpuinit init_timers_cpu(int cpu)
1662 struct tvec_base *base;
1663 static char __cpuinitdata tvec_base_done[NR_CPUS];
1665 if (!tvec_base_done[cpu]) {
1666 static char boot_done;
1670 * The APs use this path later in boot
1672 base = kmalloc_node(sizeof(*base),
1673 GFP_KERNEL | __GFP_ZERO,
1678 /* Make sure that tvec_base is 2 byte aligned */
1679 if (tbase_get_deferrable(base)) {
1684 per_cpu(tvec_bases, cpu) = base;
1687 * This is for the boot CPU - we use compile-time
1688 * static initialisation because per-cpu memory isn't
1689 * ready yet and because the memory allocators are not
1690 * initialised either.
1693 base = &boot_tvec_bases;
1695 tvec_base_done[cpu] = 1;
1697 base = per_cpu(tvec_bases, cpu);
1700 spin_lock_init(&base->lock);
1702 for (j = 0; j < TVN_SIZE; j++) {
1703 INIT_LIST_HEAD(base->tv5.vec + j);
1704 INIT_LIST_HEAD(base->tv4.vec + j);
1705 INIT_LIST_HEAD(base->tv3.vec + j);
1706 INIT_LIST_HEAD(base->tv2.vec + j);
1708 for (j = 0; j < TVR_SIZE; j++)
1709 INIT_LIST_HEAD(base->tv1.vec + j);
1711 base->timer_jiffies = jiffies;
1712 base->next_timer = base->timer_jiffies;
1713 base->active_timers = 0;
1717 #ifdef CONFIG_HOTPLUG_CPU
1718 static void migrate_timer_list(struct tvec_base *new_base, struct list_head *head)
1720 struct timer_list *timer;
1722 while (!list_empty(head)) {
1723 timer = list_first_entry(head, struct timer_list, entry);
1724 /* We ignore the accounting on the dying cpu */
1725 detach_timer(timer, false);
1726 timer_set_base(timer, new_base);
1727 internal_add_timer(new_base, timer);
1731 static void __cpuinit migrate_timers(int cpu)
1733 struct tvec_base *old_base;
1734 struct tvec_base *new_base;
1737 BUG_ON(cpu_online(cpu));
1738 old_base = per_cpu(tvec_bases, cpu);
1739 new_base = get_cpu_var(tvec_bases);
1741 * The caller is globally serialized and nobody else
1742 * takes two locks at once, deadlock is not possible.
1744 spin_lock_irq(&new_base->lock);
1745 spin_lock_nested(&old_base->lock, SINGLE_DEPTH_NESTING);
1747 BUG_ON(old_base->running_timer);
1749 for (i = 0; i < TVR_SIZE; i++)
1750 migrate_timer_list(new_base, old_base->tv1.vec + i);
1751 for (i = 0; i < TVN_SIZE; i++) {
1752 migrate_timer_list(new_base, old_base->tv2.vec + i);
1753 migrate_timer_list(new_base, old_base->tv3.vec + i);
1754 migrate_timer_list(new_base, old_base->tv4.vec + i);
1755 migrate_timer_list(new_base, old_base->tv5.vec + i);
1758 spin_unlock(&old_base->lock);
1759 spin_unlock_irq(&new_base->lock);
1760 put_cpu_var(tvec_bases);
1762 #endif /* CONFIG_HOTPLUG_CPU */
1764 static int __cpuinit timer_cpu_notify(struct notifier_block *self,
1765 unsigned long action, void *hcpu)
1767 long cpu = (long)hcpu;
1771 case CPU_UP_PREPARE:
1772 case CPU_UP_PREPARE_FROZEN:
1773 err = init_timers_cpu(cpu);
1775 return notifier_from_errno(err);
1777 #ifdef CONFIG_HOTPLUG_CPU
1779 case CPU_DEAD_FROZEN:
1780 migrate_timers(cpu);
1789 static struct notifier_block __cpuinitdata timers_nb = {
1790 .notifier_call = timer_cpu_notify,
1794 void __init init_timers(void)
1796 int err = timer_cpu_notify(&timers_nb, (unsigned long)CPU_UP_PREPARE,
1797 (void *)(long)smp_processor_id());
1801 BUG_ON(err != NOTIFY_OK);
1802 register_cpu_notifier(&timers_nb);
1803 open_softirq(TIMER_SOFTIRQ, run_timer_softirq);
1807 * msleep - sleep safely even with waitqueue interruptions
1808 * @msecs: Time in milliseconds to sleep for
1810 void msleep(unsigned int msecs)
1812 unsigned long timeout = msecs_to_jiffies(msecs) + 1;
1815 timeout = schedule_timeout_uninterruptible(timeout);
1818 EXPORT_SYMBOL(msleep);
1821 * msleep_interruptible - sleep waiting for signals
1822 * @msecs: Time in milliseconds to sleep for
1824 unsigned long msleep_interruptible(unsigned int msecs)
1826 unsigned long timeout = msecs_to_jiffies(msecs) + 1;
1828 while (timeout && !signal_pending(current))
1829 timeout = schedule_timeout_interruptible(timeout);
1830 return jiffies_to_msecs(timeout);
1833 EXPORT_SYMBOL(msleep_interruptible);
1835 static int __sched do_usleep_range(unsigned long min, unsigned long max)
1838 unsigned long delta;
1840 kmin = ktime_set(0, min * NSEC_PER_USEC);
1841 delta = (max - min) * NSEC_PER_USEC;
1842 return schedule_hrtimeout_range(&kmin, delta, HRTIMER_MODE_REL);
1846 * usleep_range - Drop in replacement for udelay where wakeup is flexible
1847 * @min: Minimum time in usecs to sleep
1848 * @max: Maximum time in usecs to sleep
1850 void usleep_range(unsigned long min, unsigned long max)
1852 __set_current_state(TASK_UNINTERRUPTIBLE);
1853 do_usleep_range(min, max);
1855 EXPORT_SYMBOL(usleep_range);