2 * linux/kernel/time/timekeeping.c
4 * Kernel timekeeping code and accessor functions
6 * This code was moved from linux/kernel/timer.c.
7 * Please see that file for copyright and history logs.
11 #include <linux/timekeeper_internal.h>
12 #include <linux/module.h>
13 #include <linux/interrupt.h>
14 #include <linux/percpu.h>
15 #include <linux/init.h>
17 #include <linux/sched.h>
18 #include <linux/syscore_ops.h>
19 #include <linux/clocksource.h>
20 #include <linux/jiffies.h>
21 #include <linux/time.h>
22 #include <linux/tick.h>
23 #include <linux/stop_machine.h>
24 #include <linux/pvclock_gtod.h>
27 static struct timekeeper timekeeper;
29 /* flag for if timekeeping is suspended */
30 int __read_mostly timekeeping_suspended;
32 static inline void tk_normalize_xtime(struct timekeeper *tk)
34 while (tk->xtime_nsec >= ((u64)NSEC_PER_SEC << tk->shift)) {
35 tk->xtime_nsec -= (u64)NSEC_PER_SEC << tk->shift;
40 static void tk_set_xtime(struct timekeeper *tk, const struct timespec *ts)
42 tk->xtime_sec = ts->tv_sec;
43 tk->xtime_nsec = (u64)ts->tv_nsec << tk->shift;
46 static void tk_xtime_add(struct timekeeper *tk, const struct timespec *ts)
48 tk->xtime_sec += ts->tv_sec;
49 tk->xtime_nsec += (u64)ts->tv_nsec << tk->shift;
50 tk_normalize_xtime(tk);
53 static void tk_set_wall_to_mono(struct timekeeper *tk, struct timespec wtm)
58 * Verify consistency of: offset_real = -wall_to_monotonic
59 * before modifying anything
61 set_normalized_timespec(&tmp, -tk->wall_to_monotonic.tv_sec,
62 -tk->wall_to_monotonic.tv_nsec);
63 WARN_ON_ONCE(tk->offs_real.tv64 != timespec_to_ktime(tmp).tv64);
64 tk->wall_to_monotonic = wtm;
65 set_normalized_timespec(&tmp, -wtm.tv_sec, -wtm.tv_nsec);
66 tk->offs_real = timespec_to_ktime(tmp);
69 static void tk_set_sleep_time(struct timekeeper *tk, struct timespec t)
71 /* Verify consistency before modifying */
72 WARN_ON_ONCE(tk->offs_boot.tv64 != timespec_to_ktime(tk->total_sleep_time).tv64);
74 tk->total_sleep_time = t;
75 tk->offs_boot = timespec_to_ktime(t);
79 * timekeeper_setup_internals - Set up internals to use clocksource clock.
81 * @clock: Pointer to clocksource.
83 * Calculates a fixed cycle/nsec interval for a given clocksource/adjustment
84 * pair and interval request.
86 * Unless you're the timekeeping code, you should not be using this!
88 static void tk_setup_internals(struct timekeeper *tk, struct clocksource *clock)
92 struct clocksource *old_clock;
94 old_clock = tk->clock;
96 clock->cycle_last = clock->read(clock);
98 /* Do the ns -> cycle conversion first, using original mult */
99 tmp = NTP_INTERVAL_LENGTH;
100 tmp <<= clock->shift;
102 tmp += clock->mult/2;
103 do_div(tmp, clock->mult);
107 interval = (cycle_t) tmp;
108 tk->cycle_interval = interval;
110 /* Go back from cycles -> shifted ns */
111 tk->xtime_interval = (u64) interval * clock->mult;
112 tk->xtime_remainder = ntpinterval - tk->xtime_interval;
114 ((u64) interval * clock->mult) >> clock->shift;
116 /* if changing clocks, convert xtime_nsec shift units */
118 int shift_change = clock->shift - old_clock->shift;
119 if (shift_change < 0)
120 tk->xtime_nsec >>= -shift_change;
122 tk->xtime_nsec <<= shift_change;
124 tk->shift = clock->shift;
127 tk->ntp_error_shift = NTP_SCALE_SHIFT - clock->shift;
130 * The timekeeper keeps its own mult values for the currently
131 * active clocksource. These value will be adjusted via NTP
132 * to counteract clock drifting.
134 tk->mult = clock->mult;
137 /* Timekeeper helper functions. */
139 #ifdef CONFIG_ARCH_USES_GETTIMEOFFSET
140 u32 (*arch_gettimeoffset)(void);
142 u32 get_arch_timeoffset(void)
144 if (likely(arch_gettimeoffset))
145 return arch_gettimeoffset();
149 static inline u32 get_arch_timeoffset(void) { return 0; }
152 static inline s64 timekeeping_get_ns(struct timekeeper *tk)
154 cycle_t cycle_now, cycle_delta;
155 struct clocksource *clock;
158 /* read clocksource: */
160 cycle_now = clock->read(clock);
162 /* calculate the delta since the last update_wall_time: */
163 cycle_delta = (cycle_now - clock->cycle_last) & clock->mask;
165 nsec = cycle_delta * tk->mult + tk->xtime_nsec;
168 /* If arch requires, add in get_arch_timeoffset() */
169 return nsec + get_arch_timeoffset();
172 static inline s64 timekeeping_get_ns_raw(struct timekeeper *tk)
174 cycle_t cycle_now, cycle_delta;
175 struct clocksource *clock;
178 /* read clocksource: */
180 cycle_now = clock->read(clock);
182 /* calculate the delta since the last update_wall_time: */
183 cycle_delta = (cycle_now - clock->cycle_last) & clock->mask;
185 /* convert delta to nanoseconds. */
186 nsec = clocksource_cyc2ns(cycle_delta, clock->mult, clock->shift);
188 /* If arch requires, add in get_arch_timeoffset() */
189 return nsec + get_arch_timeoffset();
192 static RAW_NOTIFIER_HEAD(pvclock_gtod_chain);
194 static void update_pvclock_gtod(struct timekeeper *tk)
196 raw_notifier_call_chain(&pvclock_gtod_chain, 0, tk);
200 * pvclock_gtod_register_notifier - register a pvclock timedata update listener
202 * Must hold write on timekeeper.lock
204 int pvclock_gtod_register_notifier(struct notifier_block *nb)
206 struct timekeeper *tk = &timekeeper;
210 write_seqlock_irqsave(&tk->lock, flags);
211 ret = raw_notifier_chain_register(&pvclock_gtod_chain, nb);
212 /* update timekeeping data */
213 update_pvclock_gtod(tk);
214 write_sequnlock_irqrestore(&tk->lock, flags);
218 EXPORT_SYMBOL_GPL(pvclock_gtod_register_notifier);
221 * pvclock_gtod_unregister_notifier - unregister a pvclock
222 * timedata update listener
224 * Must hold write on timekeeper.lock
226 int pvclock_gtod_unregister_notifier(struct notifier_block *nb)
228 struct timekeeper *tk = &timekeeper;
232 write_seqlock_irqsave(&tk->lock, flags);
233 ret = raw_notifier_chain_unregister(&pvclock_gtod_chain, nb);
234 write_sequnlock_irqrestore(&tk->lock, flags);
238 EXPORT_SYMBOL_GPL(pvclock_gtod_unregister_notifier);
240 /* must hold write on timekeeper.lock */
241 static void timekeeping_update(struct timekeeper *tk, bool clearntp)
248 update_pvclock_gtod(tk);
252 * timekeeping_forward_now - update clock to the current time
254 * Forward the current clock to update its state since the last call to
255 * update_wall_time(). This is useful before significant clock changes,
256 * as it avoids having to deal with this time offset explicitly.
258 static void timekeeping_forward_now(struct timekeeper *tk)
260 cycle_t cycle_now, cycle_delta;
261 struct clocksource *clock;
265 cycle_now = clock->read(clock);
266 cycle_delta = (cycle_now - clock->cycle_last) & clock->mask;
267 clock->cycle_last = cycle_now;
269 tk->xtime_nsec += cycle_delta * tk->mult;
271 /* If arch requires, add in get_arch_timeoffset() */
272 tk->xtime_nsec += (u64)get_arch_timeoffset() << tk->shift;
274 tk_normalize_xtime(tk);
276 nsec = clocksource_cyc2ns(cycle_delta, clock->mult, clock->shift);
277 timespec_add_ns(&tk->raw_time, nsec);
281 * getnstimeofday - Returns the time of day in a timespec
282 * @ts: pointer to the timespec to be set
284 * Returns the time of day in a timespec.
286 void getnstimeofday(struct timespec *ts)
288 struct timekeeper *tk = &timekeeper;
292 WARN_ON(timekeeping_suspended);
295 seq = read_seqbegin(&tk->lock);
297 ts->tv_sec = tk->xtime_sec;
298 nsecs = timekeeping_get_ns(tk);
300 } while (read_seqretry(&tk->lock, seq));
303 timespec_add_ns(ts, nsecs);
305 EXPORT_SYMBOL(getnstimeofday);
307 ktime_t ktime_get(void)
309 struct timekeeper *tk = &timekeeper;
313 WARN_ON(timekeeping_suspended);
316 seq = read_seqbegin(&tk->lock);
317 secs = tk->xtime_sec + tk->wall_to_monotonic.tv_sec;
318 nsecs = timekeeping_get_ns(tk) + tk->wall_to_monotonic.tv_nsec;
320 } while (read_seqretry(&tk->lock, seq));
322 * Use ktime_set/ktime_add_ns to create a proper ktime on
323 * 32-bit architectures without CONFIG_KTIME_SCALAR.
325 return ktime_add_ns(ktime_set(secs, 0), nsecs);
327 EXPORT_SYMBOL_GPL(ktime_get);
330 * ktime_get_ts - get the monotonic clock in timespec format
331 * @ts: pointer to timespec variable
333 * The function calculates the monotonic clock from the realtime
334 * clock and the wall_to_monotonic offset and stores the result
335 * in normalized timespec format in the variable pointed to by @ts.
337 void ktime_get_ts(struct timespec *ts)
339 struct timekeeper *tk = &timekeeper;
340 struct timespec tomono;
344 WARN_ON(timekeeping_suspended);
347 seq = read_seqbegin(&tk->lock);
348 ts->tv_sec = tk->xtime_sec;
349 nsec = timekeeping_get_ns(tk);
350 tomono = tk->wall_to_monotonic;
352 } while (read_seqretry(&tk->lock, seq));
354 ts->tv_sec += tomono.tv_sec;
356 timespec_add_ns(ts, nsec + tomono.tv_nsec);
358 EXPORT_SYMBOL_GPL(ktime_get_ts);
360 #ifdef CONFIG_NTP_PPS
363 * getnstime_raw_and_real - get day and raw monotonic time in timespec format
364 * @ts_raw: pointer to the timespec to be set to raw monotonic time
365 * @ts_real: pointer to the timespec to be set to the time of day
367 * This function reads both the time of day and raw monotonic time at the
368 * same time atomically and stores the resulting timestamps in timespec
371 void getnstime_raw_and_real(struct timespec *ts_raw, struct timespec *ts_real)
373 struct timekeeper *tk = &timekeeper;
375 s64 nsecs_raw, nsecs_real;
377 WARN_ON_ONCE(timekeeping_suspended);
380 seq = read_seqbegin(&tk->lock);
382 *ts_raw = tk->raw_time;
383 ts_real->tv_sec = tk->xtime_sec;
384 ts_real->tv_nsec = 0;
386 nsecs_raw = timekeeping_get_ns_raw(tk);
387 nsecs_real = timekeeping_get_ns(tk);
389 } while (read_seqretry(&tk->lock, seq));
391 timespec_add_ns(ts_raw, nsecs_raw);
392 timespec_add_ns(ts_real, nsecs_real);
394 EXPORT_SYMBOL(getnstime_raw_and_real);
396 #endif /* CONFIG_NTP_PPS */
399 * do_gettimeofday - Returns the time of day in a timeval
400 * @tv: pointer to the timeval to be set
402 * NOTE: Users should be converted to using getnstimeofday()
404 void do_gettimeofday(struct timeval *tv)
408 getnstimeofday(&now);
409 tv->tv_sec = now.tv_sec;
410 tv->tv_usec = now.tv_nsec/1000;
412 EXPORT_SYMBOL(do_gettimeofday);
415 * do_settimeofday - Sets the time of day
416 * @tv: pointer to the timespec variable containing the new time
418 * Sets the time of day to the new time and update NTP and notify hrtimers
420 int do_settimeofday(const struct timespec *tv)
422 struct timekeeper *tk = &timekeeper;
423 struct timespec ts_delta, xt;
426 if (!timespec_valid_strict(tv))
429 write_seqlock_irqsave(&tk->lock, flags);
431 timekeeping_forward_now(tk);
434 ts_delta.tv_sec = tv->tv_sec - xt.tv_sec;
435 ts_delta.tv_nsec = tv->tv_nsec - xt.tv_nsec;
437 tk_set_wall_to_mono(tk, timespec_sub(tk->wall_to_monotonic, ts_delta));
439 tk_set_xtime(tk, tv);
441 timekeeping_update(tk, true);
443 write_sequnlock_irqrestore(&tk->lock, flags);
445 /* signal hrtimers about time change */
450 EXPORT_SYMBOL(do_settimeofday);
453 * timekeeping_inject_offset - Adds or subtracts from the current time.
454 * @tv: pointer to the timespec variable containing the offset
456 * Adds or subtracts an offset value from the current time.
458 int timekeeping_inject_offset(struct timespec *ts)
460 struct timekeeper *tk = &timekeeper;
465 if ((unsigned long)ts->tv_nsec >= NSEC_PER_SEC)
468 write_seqlock_irqsave(&tk->lock, flags);
470 timekeeping_forward_now(tk);
472 /* Make sure the proposed value is valid */
473 tmp = timespec_add(tk_xtime(tk), *ts);
474 if (!timespec_valid_strict(&tmp)) {
479 tk_xtime_add(tk, ts);
480 tk_set_wall_to_mono(tk, timespec_sub(tk->wall_to_monotonic, *ts));
482 error: /* even if we error out, we forwarded the time, so call update */
483 timekeeping_update(tk, true);
485 write_sequnlock_irqrestore(&tk->lock, flags);
487 /* signal hrtimers about time change */
492 EXPORT_SYMBOL(timekeeping_inject_offset);
495 * change_clocksource - Swaps clocksources if a new one is available
497 * Accumulates current time interval and initializes new clocksource
499 static int change_clocksource(void *data)
501 struct timekeeper *tk = &timekeeper;
502 struct clocksource *new, *old;
505 new = (struct clocksource *) data;
507 write_seqlock_irqsave(&tk->lock, flags);
509 timekeeping_forward_now(tk);
510 if (!new->enable || new->enable(new) == 0) {
512 tk_setup_internals(tk, new);
516 timekeeping_update(tk, true);
518 write_sequnlock_irqrestore(&tk->lock, flags);
524 * timekeeping_notify - Install a new clock source
525 * @clock: pointer to the clock source
527 * This function is called from clocksource.c after a new, better clock
528 * source has been registered. The caller holds the clocksource_mutex.
530 void timekeeping_notify(struct clocksource *clock)
532 struct timekeeper *tk = &timekeeper;
534 if (tk->clock == clock)
536 stop_machine(change_clocksource, clock, NULL);
541 * ktime_get_real - get the real (wall-) time in ktime_t format
543 * returns the time in ktime_t format
545 ktime_t ktime_get_real(void)
549 getnstimeofday(&now);
551 return timespec_to_ktime(now);
553 EXPORT_SYMBOL_GPL(ktime_get_real);
556 * getrawmonotonic - Returns the raw monotonic time in a timespec
557 * @ts: pointer to the timespec to be set
559 * Returns the raw monotonic time (completely un-modified by ntp)
561 void getrawmonotonic(struct timespec *ts)
563 struct timekeeper *tk = &timekeeper;
568 seq = read_seqbegin(&tk->lock);
569 nsecs = timekeeping_get_ns_raw(tk);
572 } while (read_seqretry(&tk->lock, seq));
574 timespec_add_ns(ts, nsecs);
576 EXPORT_SYMBOL(getrawmonotonic);
579 * timekeeping_valid_for_hres - Check if timekeeping is suitable for hres
581 int timekeeping_valid_for_hres(void)
583 struct timekeeper *tk = &timekeeper;
588 seq = read_seqbegin(&tk->lock);
590 ret = tk->clock->flags & CLOCK_SOURCE_VALID_FOR_HRES;
592 } while (read_seqretry(&tk->lock, seq));
598 * timekeeping_max_deferment - Returns max time the clocksource can be deferred
600 u64 timekeeping_max_deferment(void)
602 struct timekeeper *tk = &timekeeper;
607 seq = read_seqbegin(&tk->lock);
609 ret = tk->clock->max_idle_ns;
611 } while (read_seqretry(&tk->lock, seq));
617 * read_persistent_clock - Return time from the persistent clock.
619 * Weak dummy function for arches that do not yet support it.
620 * Reads the time from the battery backed persistent clock.
621 * Returns a timespec with tv_sec=0 and tv_nsec=0 if unsupported.
623 * XXX - Do be sure to remove it once all arches implement it.
625 void __attribute__((weak)) read_persistent_clock(struct timespec *ts)
632 * read_boot_clock - Return time of the system start.
634 * Weak dummy function for arches that do not yet support it.
635 * Function to read the exact time the system has been started.
636 * Returns a timespec with tv_sec=0 and tv_nsec=0 if unsupported.
638 * XXX - Do be sure to remove it once all arches implement it.
640 void __attribute__((weak)) read_boot_clock(struct timespec *ts)
647 * timekeeping_init - Initializes the clocksource and common timekeeping values
649 void __init timekeeping_init(void)
651 struct timekeeper *tk = &timekeeper;
652 struct clocksource *clock;
654 struct timespec now, boot, tmp;
656 read_persistent_clock(&now);
657 if (!timespec_valid_strict(&now)) {
658 pr_warn("WARNING: Persistent clock returned invalid value!\n"
659 " Check your CMOS/BIOS settings.\n");
664 read_boot_clock(&boot);
665 if (!timespec_valid_strict(&boot)) {
666 pr_warn("WARNING: Boot clock returned invalid value!\n"
667 " Check your CMOS/BIOS settings.\n");
672 seqlock_init(&tk->lock);
676 write_seqlock_irqsave(&tk->lock, flags);
677 clock = clocksource_default_clock();
679 clock->enable(clock);
680 tk_setup_internals(tk, clock);
682 tk_set_xtime(tk, &now);
683 tk->raw_time.tv_sec = 0;
684 tk->raw_time.tv_nsec = 0;
685 if (boot.tv_sec == 0 && boot.tv_nsec == 0)
688 set_normalized_timespec(&tmp, -boot.tv_sec, -boot.tv_nsec);
689 tk_set_wall_to_mono(tk, tmp);
693 tk_set_sleep_time(tk, tmp);
695 write_sequnlock_irqrestore(&tk->lock, flags);
698 /* time in seconds when suspend began */
699 static struct timespec timekeeping_suspend_time;
702 * __timekeeping_inject_sleeptime - Internal function to add sleep interval
703 * @delta: pointer to a timespec delta value
705 * Takes a timespec offset measuring a suspend interval and properly
706 * adds the sleep offset to the timekeeping variables.
708 static void __timekeeping_inject_sleeptime(struct timekeeper *tk,
709 struct timespec *delta)
711 if (!timespec_valid_strict(delta)) {
712 printk(KERN_WARNING "__timekeeping_inject_sleeptime: Invalid "
713 "sleep delta value!\n");
716 tk_xtime_add(tk, delta);
717 tk_set_wall_to_mono(tk, timespec_sub(tk->wall_to_monotonic, *delta));
718 tk_set_sleep_time(tk, timespec_add(tk->total_sleep_time, *delta));
722 * timekeeping_inject_sleeptime - Adds suspend interval to timeekeeping values
723 * @delta: pointer to a timespec delta value
725 * This hook is for architectures that cannot support read_persistent_clock
726 * because their RTC/persistent clock is only accessible when irqs are enabled.
728 * This function should only be called by rtc_resume(), and allows
729 * a suspend offset to be injected into the timekeeping values.
731 void timekeeping_inject_sleeptime(struct timespec *delta)
733 struct timekeeper *tk = &timekeeper;
737 /* Make sure we don't set the clock twice */
738 read_persistent_clock(&ts);
739 if (!(ts.tv_sec == 0 && ts.tv_nsec == 0))
742 write_seqlock_irqsave(&tk->lock, flags);
744 timekeeping_forward_now(tk);
746 __timekeeping_inject_sleeptime(tk, delta);
748 timekeeping_update(tk, true);
750 write_sequnlock_irqrestore(&tk->lock, flags);
752 /* signal hrtimers about time change */
757 * timekeeping_resume - Resumes the generic timekeeping subsystem.
759 * This is for the generic clocksource timekeeping.
760 * xtime/wall_to_monotonic/jiffies/etc are
761 * still managed by arch specific suspend/resume code.
763 static void timekeeping_resume(void)
765 struct timekeeper *tk = &timekeeper;
769 read_persistent_clock(&ts);
771 clockevents_resume();
772 clocksource_resume();
774 write_seqlock_irqsave(&tk->lock, flags);
776 if (timespec_compare(&ts, &timekeeping_suspend_time) > 0) {
777 ts = timespec_sub(ts, timekeeping_suspend_time);
778 __timekeeping_inject_sleeptime(tk, &ts);
780 /* re-base the last cycle value */
781 tk->clock->cycle_last = tk->clock->read(tk->clock);
783 timekeeping_suspended = 0;
784 timekeeping_update(tk, false);
785 write_sequnlock_irqrestore(&tk->lock, flags);
787 touch_softlockup_watchdog();
789 clockevents_notify(CLOCK_EVT_NOTIFY_RESUME, NULL);
791 /* Resume hrtimers */
795 static int timekeeping_suspend(void)
797 struct timekeeper *tk = &timekeeper;
799 struct timespec delta, delta_delta;
800 static struct timespec old_delta;
802 read_persistent_clock(&timekeeping_suspend_time);
804 write_seqlock_irqsave(&tk->lock, flags);
805 timekeeping_forward_now(tk);
806 timekeeping_suspended = 1;
809 * To avoid drift caused by repeated suspend/resumes,
810 * which each can add ~1 second drift error,
811 * try to compensate so the difference in system time
812 * and persistent_clock time stays close to constant.
814 delta = timespec_sub(tk_xtime(tk), timekeeping_suspend_time);
815 delta_delta = timespec_sub(delta, old_delta);
816 if (abs(delta_delta.tv_sec) >= 2) {
818 * if delta_delta is too large, assume time correction
819 * has occured and set old_delta to the current delta.
823 /* Otherwise try to adjust old_system to compensate */
824 timekeeping_suspend_time =
825 timespec_add(timekeeping_suspend_time, delta_delta);
827 write_sequnlock_irqrestore(&tk->lock, flags);
829 clockevents_notify(CLOCK_EVT_NOTIFY_SUSPEND, NULL);
830 clocksource_suspend();
831 clockevents_suspend();
836 /* sysfs resume/suspend bits for timekeeping */
837 static struct syscore_ops timekeeping_syscore_ops = {
838 .resume = timekeeping_resume,
839 .suspend = timekeeping_suspend,
842 static int __init timekeeping_init_ops(void)
844 register_syscore_ops(&timekeeping_syscore_ops);
848 device_initcall(timekeeping_init_ops);
851 * If the error is already larger, we look ahead even further
852 * to compensate for late or lost adjustments.
854 static __always_inline int timekeeping_bigadjust(struct timekeeper *tk,
855 s64 error, s64 *interval,
863 * Use the current error value to determine how much to look ahead.
864 * The larger the error the slower we adjust for it to avoid problems
865 * with losing too many ticks, otherwise we would overadjust and
866 * produce an even larger error. The smaller the adjustment the
867 * faster we try to adjust for it, as lost ticks can do less harm
868 * here. This is tuned so that an error of about 1 msec is adjusted
869 * within about 1 sec (or 2^20 nsec in 2^SHIFT_HZ ticks).
871 error2 = tk->ntp_error >> (NTP_SCALE_SHIFT + 22 - 2 * SHIFT_HZ);
872 error2 = abs(error2);
873 for (look_ahead = 0; error2 > 0; look_ahead++)
877 * Now calculate the error in (1 << look_ahead) ticks, but first
878 * remove the single look ahead already included in the error.
880 tick_error = ntp_tick_length() >> (tk->ntp_error_shift + 1);
881 tick_error -= tk->xtime_interval >> 1;
882 error = ((error - tick_error) >> look_ahead) + tick_error;
884 /* Finally calculate the adjustment shift value. */
889 *interval = -*interval;
893 for (adj = 0; error > i; adj++)
902 * Adjust the multiplier to reduce the error value,
903 * this is optimized for the most common adjustments of -1,0,1,
904 * for other values we can do a bit more work.
906 static void timekeeping_adjust(struct timekeeper *tk, s64 offset)
908 s64 error, interval = tk->cycle_interval;
912 * The point of this is to check if the error is greater than half
915 * First we shift it down from NTP_SHIFT to clocksource->shifted nsecs.
917 * Note we subtract one in the shift, so that error is really error*2.
918 * This "saves" dividing(shifting) interval twice, but keeps the
919 * (error > interval) comparison as still measuring if error is
920 * larger than half an interval.
922 * Note: It does not "save" on aggravation when reading the code.
924 error = tk->ntp_error >> (tk->ntp_error_shift - 1);
925 if (error > interval) {
927 * We now divide error by 4(via shift), which checks if
928 * the error is greater than twice the interval.
929 * If it is greater, we need a bigadjust, if its smaller,
930 * we can adjust by 1.
934 * XXX - In update_wall_time, we round up to the next
935 * nanosecond, and store the amount rounded up into
936 * the error. This causes the likely below to be unlikely.
938 * The proper fix is to avoid rounding up by using
939 * the high precision tk->xtime_nsec instead of
940 * xtime.tv_nsec everywhere. Fixing this will take some
943 if (likely(error <= interval))
946 adj = timekeeping_bigadjust(tk, error, &interval, &offset);
948 if (error < -interval) {
949 /* See comment above, this is just switched for the negative */
951 if (likely(error >= -interval)) {
953 interval = -interval;
956 adj = timekeeping_bigadjust(tk, error, &interval, &offset);
963 if (unlikely(tk->clock->maxadj &&
964 (tk->mult + adj > tk->clock->mult + tk->clock->maxadj))) {
965 printk_once(KERN_WARNING
966 "Adjusting %s more than 11%% (%ld vs %ld)\n",
967 tk->clock->name, (long)tk->mult + adj,
968 (long)tk->clock->mult + tk->clock->maxadj);
971 * So the following can be confusing.
973 * To keep things simple, lets assume adj == 1 for now.
975 * When adj != 1, remember that the interval and offset values
976 * have been appropriately scaled so the math is the same.
978 * The basic idea here is that we're increasing the multiplier
979 * by one, this causes the xtime_interval to be incremented by
980 * one cycle_interval. This is because:
981 * xtime_interval = cycle_interval * mult
982 * So if mult is being incremented by one:
983 * xtime_interval = cycle_interval * (mult + 1)
985 * xtime_interval = (cycle_interval * mult) + cycle_interval
986 * Which can be shortened to:
987 * xtime_interval += cycle_interval
989 * So offset stores the non-accumulated cycles. Thus the current
990 * time (in shifted nanoseconds) is:
991 * now = (offset * adj) + xtime_nsec
992 * Now, even though we're adjusting the clock frequency, we have
993 * to keep time consistent. In other words, we can't jump back
994 * in time, and we also want to avoid jumping forward in time.
996 * So given the same offset value, we need the time to be the same
997 * both before and after the freq adjustment.
998 * now = (offset * adj_1) + xtime_nsec_1
999 * now = (offset * adj_2) + xtime_nsec_2
1001 * (offset * adj_1) + xtime_nsec_1 =
1002 * (offset * adj_2) + xtime_nsec_2
1006 * (offset * adj_1) + xtime_nsec_1 =
1007 * (offset * (adj_1+1)) + xtime_nsec_2
1008 * (offset * adj_1) + xtime_nsec_1 =
1009 * (offset * adj_1) + offset + xtime_nsec_2
1010 * Canceling the sides:
1011 * xtime_nsec_1 = offset + xtime_nsec_2
1013 * xtime_nsec_2 = xtime_nsec_1 - offset
1014 * Which simplfies to:
1015 * xtime_nsec -= offset
1017 * XXX - TODO: Doc ntp_error calculation.
1020 tk->xtime_interval += interval;
1021 tk->xtime_nsec -= offset;
1022 tk->ntp_error -= (interval - offset) << tk->ntp_error_shift;
1026 * It may be possible that when we entered this function, xtime_nsec
1027 * was very small. Further, if we're slightly speeding the clocksource
1028 * in the code above, its possible the required corrective factor to
1029 * xtime_nsec could cause it to underflow.
1031 * Now, since we already accumulated the second, cannot simply roll
1032 * the accumulated second back, since the NTP subsystem has been
1033 * notified via second_overflow. So instead we push xtime_nsec forward
1034 * by the amount we underflowed, and add that amount into the error.
1036 * We'll correct this error next time through this function, when
1037 * xtime_nsec is not as small.
1039 if (unlikely((s64)tk->xtime_nsec < 0)) {
1040 s64 neg = -(s64)tk->xtime_nsec;
1042 tk->ntp_error += neg << tk->ntp_error_shift;
1048 * accumulate_nsecs_to_secs - Accumulates nsecs into secs
1050 * Helper function that accumulates a the nsecs greater then a second
1051 * from the xtime_nsec field to the xtime_secs field.
1052 * It also calls into the NTP code to handle leapsecond processing.
1055 static inline void accumulate_nsecs_to_secs(struct timekeeper *tk)
1057 u64 nsecps = (u64)NSEC_PER_SEC << tk->shift;
1059 while (tk->xtime_nsec >= nsecps) {
1062 tk->xtime_nsec -= nsecps;
1065 /* Figure out if its a leap sec and apply if needed */
1066 leap = second_overflow(tk->xtime_sec);
1067 if (unlikely(leap)) {
1070 tk->xtime_sec += leap;
1074 tk_set_wall_to_mono(tk,
1075 timespec_sub(tk->wall_to_monotonic, ts));
1077 clock_was_set_delayed();
1083 * logarithmic_accumulation - shifted accumulation of cycles
1085 * This functions accumulates a shifted interval of cycles into
1086 * into a shifted interval nanoseconds. Allows for O(log) accumulation
1089 * Returns the unconsumed cycles.
1091 static cycle_t logarithmic_accumulation(struct timekeeper *tk, cycle_t offset,
1096 /* If the offset is smaller then a shifted interval, do nothing */
1097 if (offset < tk->cycle_interval<<shift)
1100 /* Accumulate one shifted interval */
1101 offset -= tk->cycle_interval << shift;
1102 tk->clock->cycle_last += tk->cycle_interval << shift;
1104 tk->xtime_nsec += tk->xtime_interval << shift;
1105 accumulate_nsecs_to_secs(tk);
1107 /* Accumulate raw time */
1108 raw_nsecs = (u64)tk->raw_interval << shift;
1109 raw_nsecs += tk->raw_time.tv_nsec;
1110 if (raw_nsecs >= NSEC_PER_SEC) {
1111 u64 raw_secs = raw_nsecs;
1112 raw_nsecs = do_div(raw_secs, NSEC_PER_SEC);
1113 tk->raw_time.tv_sec += raw_secs;
1115 tk->raw_time.tv_nsec = raw_nsecs;
1117 /* Accumulate error between NTP and clock interval */
1118 tk->ntp_error += ntp_tick_length() << shift;
1119 tk->ntp_error -= (tk->xtime_interval + tk->xtime_remainder) <<
1120 (tk->ntp_error_shift + shift);
1125 #ifdef CONFIG_GENERIC_TIME_VSYSCALL_OLD
1126 static inline void old_vsyscall_fixup(struct timekeeper *tk)
1131 * Store only full nanoseconds into xtime_nsec after rounding
1132 * it up and add the remainder to the error difference.
1133 * XXX - This is necessary to avoid small 1ns inconsistnecies caused
1134 * by truncating the remainder in vsyscalls. However, it causes
1135 * additional work to be done in timekeeping_adjust(). Once
1136 * the vsyscall implementations are converted to use xtime_nsec
1137 * (shifted nanoseconds), and CONFIG_GENERIC_TIME_VSYSCALL_OLD
1138 * users are removed, this can be killed.
1140 remainder = tk->xtime_nsec & ((1ULL << tk->shift) - 1);
1141 tk->xtime_nsec -= remainder;
1142 tk->xtime_nsec += 1ULL << tk->shift;
1143 tk->ntp_error += remainder << tk->ntp_error_shift;
1147 #define old_vsyscall_fixup(tk)
1153 * update_wall_time - Uses the current clocksource to increment the wall time
1156 static void update_wall_time(void)
1158 struct clocksource *clock;
1159 struct timekeeper *tk = &timekeeper;
1161 int shift = 0, maxshift;
1162 unsigned long flags;
1164 write_seqlock_irqsave(&tk->lock, flags);
1166 /* Make sure we're fully resumed: */
1167 if (unlikely(timekeeping_suspended))
1172 #ifdef CONFIG_ARCH_USES_GETTIMEOFFSET
1173 offset = tk->cycle_interval;
1175 offset = (clock->read(clock) - clock->cycle_last) & clock->mask;
1178 /* Check if there's really nothing to do */
1179 if (offset < tk->cycle_interval)
1183 * With NO_HZ we may have to accumulate many cycle_intervals
1184 * (think "ticks") worth of time at once. To do this efficiently,
1185 * we calculate the largest doubling multiple of cycle_intervals
1186 * that is smaller than the offset. We then accumulate that
1187 * chunk in one go, and then try to consume the next smaller
1190 shift = ilog2(offset) - ilog2(tk->cycle_interval);
1191 shift = max(0, shift);
1192 /* Bound shift to one less than what overflows tick_length */
1193 maxshift = (64 - (ilog2(ntp_tick_length())+1)) - 1;
1194 shift = min(shift, maxshift);
1195 while (offset >= tk->cycle_interval) {
1196 offset = logarithmic_accumulation(tk, offset, shift);
1197 if (offset < tk->cycle_interval<<shift)
1201 /* correct the clock when NTP error is too big */
1202 timekeeping_adjust(tk, offset);
1205 * XXX This can be killed once everyone converts
1206 * to the new update_vsyscall.
1208 old_vsyscall_fixup(tk);
1211 * Finally, make sure that after the rounding
1212 * xtime_nsec isn't larger than NSEC_PER_SEC
1214 accumulate_nsecs_to_secs(tk);
1216 timekeeping_update(tk, false);
1219 write_sequnlock_irqrestore(&tk->lock, flags);
1224 * getboottime - Return the real time of system boot.
1225 * @ts: pointer to the timespec to be set
1227 * Returns the wall-time of boot in a timespec.
1229 * This is based on the wall_to_monotonic offset and the total suspend
1230 * time. Calls to settimeofday will affect the value returned (which
1231 * basically means that however wrong your real time clock is at boot time,
1232 * you get the right time here).
1234 void getboottime(struct timespec *ts)
1236 struct timekeeper *tk = &timekeeper;
1237 struct timespec boottime = {
1238 .tv_sec = tk->wall_to_monotonic.tv_sec +
1239 tk->total_sleep_time.tv_sec,
1240 .tv_nsec = tk->wall_to_monotonic.tv_nsec +
1241 tk->total_sleep_time.tv_nsec
1244 set_normalized_timespec(ts, -boottime.tv_sec, -boottime.tv_nsec);
1246 EXPORT_SYMBOL_GPL(getboottime);
1249 * get_monotonic_boottime - Returns monotonic time since boot
1250 * @ts: pointer to the timespec to be set
1252 * Returns the monotonic time since boot in a timespec.
1254 * This is similar to CLOCK_MONTONIC/ktime_get_ts, but also
1255 * includes the time spent in suspend.
1257 void get_monotonic_boottime(struct timespec *ts)
1259 struct timekeeper *tk = &timekeeper;
1260 struct timespec tomono, sleep;
1264 WARN_ON(timekeeping_suspended);
1267 seq = read_seqbegin(&tk->lock);
1268 ts->tv_sec = tk->xtime_sec;
1269 nsec = timekeeping_get_ns(tk);
1270 tomono = tk->wall_to_monotonic;
1271 sleep = tk->total_sleep_time;
1273 } while (read_seqretry(&tk->lock, seq));
1275 ts->tv_sec += tomono.tv_sec + sleep.tv_sec;
1277 timespec_add_ns(ts, nsec + tomono.tv_nsec + sleep.tv_nsec);
1279 EXPORT_SYMBOL_GPL(get_monotonic_boottime);
1282 * ktime_get_boottime - Returns monotonic time since boot in a ktime
1284 * Returns the monotonic time since boot in a ktime
1286 * This is similar to CLOCK_MONTONIC/ktime_get, but also
1287 * includes the time spent in suspend.
1289 ktime_t ktime_get_boottime(void)
1293 get_monotonic_boottime(&ts);
1294 return timespec_to_ktime(ts);
1296 EXPORT_SYMBOL_GPL(ktime_get_boottime);
1299 * monotonic_to_bootbased - Convert the monotonic time to boot based.
1300 * @ts: pointer to the timespec to be converted
1302 void monotonic_to_bootbased(struct timespec *ts)
1304 struct timekeeper *tk = &timekeeper;
1306 *ts = timespec_add(*ts, tk->total_sleep_time);
1308 EXPORT_SYMBOL_GPL(monotonic_to_bootbased);
1310 unsigned long get_seconds(void)
1312 struct timekeeper *tk = &timekeeper;
1314 return tk->xtime_sec;
1316 EXPORT_SYMBOL(get_seconds);
1318 struct timespec __current_kernel_time(void)
1320 struct timekeeper *tk = &timekeeper;
1322 return tk_xtime(tk);
1325 struct timespec current_kernel_time(void)
1327 struct timekeeper *tk = &timekeeper;
1328 struct timespec now;
1332 seq = read_seqbegin(&tk->lock);
1335 } while (read_seqretry(&tk->lock, seq));
1339 EXPORT_SYMBOL(current_kernel_time);
1341 struct timespec get_monotonic_coarse(void)
1343 struct timekeeper *tk = &timekeeper;
1344 struct timespec now, mono;
1348 seq = read_seqbegin(&tk->lock);
1351 mono = tk->wall_to_monotonic;
1352 } while (read_seqretry(&tk->lock, seq));
1354 set_normalized_timespec(&now, now.tv_sec + mono.tv_sec,
1355 now.tv_nsec + mono.tv_nsec);
1360 * Must hold jiffies_lock
1362 void do_timer(unsigned long ticks)
1364 jiffies_64 += ticks;
1366 calc_global_load(ticks);
1370 * get_xtime_and_monotonic_and_sleep_offset() - get xtime, wall_to_monotonic,
1371 * and sleep offsets.
1372 * @xtim: pointer to timespec to be set with xtime
1373 * @wtom: pointer to timespec to be set with wall_to_monotonic
1374 * @sleep: pointer to timespec to be set with time in suspend
1376 void get_xtime_and_monotonic_and_sleep_offset(struct timespec *xtim,
1377 struct timespec *wtom, struct timespec *sleep)
1379 struct timekeeper *tk = &timekeeper;
1383 seq = read_seqbegin(&tk->lock);
1384 *xtim = tk_xtime(tk);
1385 *wtom = tk->wall_to_monotonic;
1386 *sleep = tk->total_sleep_time;
1387 } while (read_seqretry(&tk->lock, seq));
1390 #ifdef CONFIG_HIGH_RES_TIMERS
1392 * ktime_get_update_offsets - hrtimer helper
1393 * @offs_real: pointer to storage for monotonic -> realtime offset
1394 * @offs_boot: pointer to storage for monotonic -> boottime offset
1396 * Returns current monotonic time and updates the offsets
1397 * Called from hrtimer_interupt() or retrigger_next_event()
1399 ktime_t ktime_get_update_offsets(ktime_t *offs_real, ktime_t *offs_boot)
1401 struct timekeeper *tk = &timekeeper;
1407 seq = read_seqbegin(&tk->lock);
1409 secs = tk->xtime_sec;
1410 nsecs = timekeeping_get_ns(tk);
1412 *offs_real = tk->offs_real;
1413 *offs_boot = tk->offs_boot;
1414 } while (read_seqretry(&tk->lock, seq));
1416 now = ktime_add_ns(ktime_set(secs, 0), nsecs);
1417 now = ktime_sub(now, *offs_real);
1423 * ktime_get_monotonic_offset() - get wall_to_monotonic in ktime_t format
1425 ktime_t ktime_get_monotonic_offset(void)
1427 struct timekeeper *tk = &timekeeper;
1429 struct timespec wtom;
1432 seq = read_seqbegin(&tk->lock);
1433 wtom = tk->wall_to_monotonic;
1434 } while (read_seqretry(&tk->lock, seq));
1436 return timespec_to_ktime(wtom);
1438 EXPORT_SYMBOL_GPL(ktime_get_monotonic_offset);
1441 * xtime_update() - advances the timekeeping infrastructure
1442 * @ticks: number of ticks, that have elapsed since the last call.
1444 * Must be called with interrupts disabled.
1446 void xtime_update(unsigned long ticks)
1448 write_seqlock(&jiffies_lock);
1450 write_sequnlock(&jiffies_lock);