2 * linux/kernel/posix-timers.c
5 * 2002-10-15 Posix Clocks & timers
6 * by George Anzinger george@mvista.com
8 * Copyright (C) 2002 2003 by MontaVista Software.
10 * 2004-06-01 Fix CLOCK_REALTIME clock/timer TIMER_ABSTIME bug.
11 * Copyright (C) 2004 Boris Hu
13 * This program is free software; you can redistribute it and/or modify
14 * it under the terms of the GNU General Public License as published by
15 * the Free Software Foundation; either version 2 of the License, or (at
16 * your option) any later version.
18 * This program is distributed in the hope that it will be useful, but
19 * WITHOUT ANY WARRANTY; without even the implied warranty of
20 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
21 * General Public License for more details.
23 * You should have received a copy of the GNU General Public License
24 * along with this program; if not, write to the Free Software
25 * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
27 * MontaVista Software | 1237 East Arques Avenue | Sunnyvale | CA 94085 | USA
30 /* These are all the functions necessary to implement
31 * POSIX clocks & timers
34 #include <linux/interrupt.h>
35 #include <linux/slab.h>
36 #include <linux/time.h>
37 #include <linux/mutex.h>
38 #include <linux/sched/task.h>
40 #include <linux/uaccess.h>
41 #include <linux/list.h>
42 #include <linux/init.h>
43 #include <linux/compiler.h>
44 #include <linux/hash.h>
45 #include <linux/posix-clock.h>
46 #include <linux/posix-timers.h>
47 #include <linux/syscalls.h>
48 #include <linux/wait.h>
49 #include <linux/workqueue.h>
50 #include <linux/export.h>
51 #include <linux/hashtable.h>
53 #include "timekeeping.h"
54 #include "posix-timers.h"
57 * Management arrays for POSIX timers. Timers are now kept in static hash table
59 * Timer ids are allocated by local routine, which selects proper hash head by
60 * key, constructed from current->signal address and per signal struct counter.
61 * This keeps timer ids unique per process, but now they can intersect between
66 * Lets keep our timers in a slab cache :-)
68 static struct kmem_cache *posix_timers_cache;
70 static DEFINE_HASHTABLE(posix_timers_hashtable, 9);
71 static DEFINE_SPINLOCK(hash_lock);
73 static const struct k_clock * const posix_clocks[];
74 static const struct k_clock *clockid_to_kclock(const clockid_t id);
77 * we assume that the new SIGEV_THREAD_ID shares no bits with the other
78 * SIGEV values. Here we put out an error if this assumption fails.
80 #if SIGEV_THREAD_ID != (SIGEV_THREAD_ID & \
81 ~(SIGEV_SIGNAL | SIGEV_NONE | SIGEV_THREAD))
82 #error "SIGEV_THREAD_ID must not share bit with other SIGEV values!"
86 * parisc wants ENOTSUP instead of EOPNOTSUPP
89 # define ENANOSLEEP_NOTSUP EOPNOTSUPP
91 # define ENANOSLEEP_NOTSUP ENOTSUP
95 * The timer ID is turned into a timer address by idr_find().
96 * Verifying a valid ID consists of:
98 * a) checking that idr_find() returns other than -1.
99 * b) checking that the timer id matches the one in the timer itself.
100 * c) that the timer owner is in the callers thread group.
104 * CLOCKs: The POSIX standard calls for a couple of clocks and allows us
105 * to implement others. This structure defines the various
108 * RESOLUTION: Clock resolution is used to round up timer and interval
109 * times, NOT to report clock times, which are reported with as
110 * much resolution as the system can muster. In some cases this
111 * resolution may depend on the underlying clock hardware and
112 * may not be quantifiable until run time, and only then is the
113 * necessary code is written. The standard says we should say
114 * something about this issue in the documentation...
116 * FUNCTIONS: The CLOCKs structure defines possible functions to
117 * handle various clock functions.
119 * The standard POSIX timer management code assumes the
120 * following: 1.) The k_itimer struct (sched.h) is used for
121 * the timer. 2.) The list, it_lock, it_clock, it_id and
122 * it_pid fields are not modified by timer code.
124 * Permissions: It is assumed that the clock_settime() function defined
125 * for each clock will take care of permission checks. Some
126 * clocks may be set able by any user (i.e. local process
127 * clocks) others not. Currently the only set able clock we
128 * have is CLOCK_REALTIME and its high res counter part, both of
129 * which we beg off on and pass to do_sys_settimeofday().
131 static struct k_itimer *__lock_timer(timer_t timer_id, unsigned long *flags);
133 #define lock_timer(tid, flags) \
134 ({ struct k_itimer *__timr; \
135 __cond_lock(&__timr->it_lock, __timr = __lock_timer(tid, flags)); \
139 static int hash(struct signal_struct *sig, unsigned int nr)
141 return hash_32(hash32_ptr(sig) ^ nr, HASH_BITS(posix_timers_hashtable));
144 static struct k_itimer *__posix_timers_find(struct hlist_head *head,
145 struct signal_struct *sig,
148 struct k_itimer *timer;
150 hlist_for_each_entry_rcu(timer, head, t_hash) {
151 if ((timer->it_signal == sig) && (timer->it_id == id))
157 static struct k_itimer *posix_timer_by_id(timer_t id)
159 struct signal_struct *sig = current->signal;
160 struct hlist_head *head = &posix_timers_hashtable[hash(sig, id)];
162 return __posix_timers_find(head, sig, id);
165 static int posix_timer_add(struct k_itimer *timer)
167 struct signal_struct *sig = current->signal;
168 int first_free_id = sig->posix_timer_id;
169 struct hlist_head *head;
173 spin_lock(&hash_lock);
174 head = &posix_timers_hashtable[hash(sig, sig->posix_timer_id)];
175 if (!__posix_timers_find(head, sig, sig->posix_timer_id)) {
176 hlist_add_head_rcu(&timer->t_hash, head);
177 ret = sig->posix_timer_id;
179 if (++sig->posix_timer_id < 0)
180 sig->posix_timer_id = 0;
181 if ((sig->posix_timer_id == first_free_id) && (ret == -ENOENT))
182 /* Loop over all possible ids completed */
184 spin_unlock(&hash_lock);
185 } while (ret == -ENOENT);
189 static inline void unlock_timer(struct k_itimer *timr, unsigned long flags)
191 spin_unlock_irqrestore(&timr->it_lock, flags);
194 /* Get clock_realtime */
195 static int posix_clock_realtime_get(clockid_t which_clock, struct timespec64 *tp)
197 ktime_get_real_ts64(tp);
201 /* Set clock_realtime */
202 static int posix_clock_realtime_set(const clockid_t which_clock,
203 const struct timespec64 *tp)
205 return do_sys_settimeofday64(tp, NULL);
208 static int posix_clock_realtime_adj(const clockid_t which_clock,
211 return do_adjtimex(t);
215 * Get monotonic time for posix timers
217 static int posix_ktime_get_ts(clockid_t which_clock, struct timespec64 *tp)
224 * Get monotonic-raw time for posix timers
226 static int posix_get_monotonic_raw(clockid_t which_clock, struct timespec64 *tp)
228 getrawmonotonic64(tp);
233 static int posix_get_realtime_coarse(clockid_t which_clock, struct timespec64 *tp)
235 *tp = current_kernel_time64();
239 static int posix_get_monotonic_coarse(clockid_t which_clock,
240 struct timespec64 *tp)
242 *tp = get_monotonic_coarse64();
246 static int posix_get_coarse_res(const clockid_t which_clock, struct timespec64 *tp)
248 *tp = ktime_to_timespec64(KTIME_LOW_RES);
252 static int posix_get_boottime(const clockid_t which_clock, struct timespec64 *tp)
254 get_monotonic_boottime64(tp);
258 static int posix_get_tai(clockid_t which_clock, struct timespec64 *tp)
260 timekeeping_clocktai64(tp);
264 static int posix_get_hrtimer_res(clockid_t which_clock, struct timespec64 *tp)
267 tp->tv_nsec = hrtimer_resolution;
272 * Initialize everything, well, just everything in Posix clocks/timers ;)
274 static __init int init_posix_timers(void)
276 posix_timers_cache = kmem_cache_create("posix_timers_cache",
277 sizeof (struct k_itimer), 0, SLAB_PANIC,
281 __initcall(init_posix_timers);
283 static void common_hrtimer_rearm(struct k_itimer *timr)
285 struct hrtimer *timer = &timr->it.real.timer;
287 if (!timr->it_interval)
290 timr->it_overrun += (unsigned int) hrtimer_forward(timer,
291 timer->base->get_time(),
293 hrtimer_restart(timer);
297 * This function is exported for use by the signal deliver code. It is
298 * called just prior to the info block being released and passes that
299 * block to us. It's function is to update the overrun entry AND to
300 * restart the timer. It should only be called if the timer is to be
301 * restarted (i.e. we have flagged this in the sys_private entry of the
304 * To protect against the timer going away while the interrupt is queued,
305 * we require that the it_requeue_pending flag be set.
307 void posixtimer_rearm(struct siginfo *info)
309 struct k_itimer *timr;
312 timr = lock_timer(info->si_tid, &flags);
316 if (timr->it_requeue_pending == info->si_sys_private) {
317 timr->kclock->timer_rearm(timr);
320 timr->it_overrun_last = timr->it_overrun;
321 timr->it_overrun = -1;
322 ++timr->it_requeue_pending;
324 info->si_overrun += timr->it_overrun_last;
327 unlock_timer(timr, flags);
330 int posix_timer_event(struct k_itimer *timr, int si_private)
332 struct task_struct *task;
333 int shared, ret = -1;
335 * FIXME: if ->sigq is queued we can race with
336 * dequeue_signal()->posixtimer_rearm().
338 * If dequeue_signal() sees the "right" value of
339 * si_sys_private it calls posixtimer_rearm().
340 * We re-queue ->sigq and drop ->it_lock().
341 * posixtimer_rearm() locks the timer
342 * and re-schedules it while ->sigq is pending.
343 * Not really bad, but not that we want.
345 timr->sigq->info.si_sys_private = si_private;
348 task = pid_task(timr->it_pid, PIDTYPE_PID);
350 shared = !(timr->it_sigev_notify & SIGEV_THREAD_ID);
351 ret = send_sigqueue(timr->sigq, task, shared);
354 /* If we failed to send the signal the timer stops. */
359 * This function gets called when a POSIX.1b interval timer expires. It
360 * is used as a callback from the kernel internal timer. The
361 * run_timer_list code ALWAYS calls with interrupts on.
363 * This code is for CLOCK_REALTIME* and CLOCK_MONOTONIC* timers.
365 static enum hrtimer_restart posix_timer_fn(struct hrtimer *timer)
367 struct k_itimer *timr;
370 enum hrtimer_restart ret = HRTIMER_NORESTART;
372 timr = container_of(timer, struct k_itimer, it.real.timer);
373 spin_lock_irqsave(&timr->it_lock, flags);
376 if (timr->it_interval != 0)
377 si_private = ++timr->it_requeue_pending;
379 if (posix_timer_event(timr, si_private)) {
381 * signal was not sent because of sig_ignor
382 * we will not get a call back to restart it AND
383 * it should be restarted.
385 if (timr->it_interval != 0) {
386 ktime_t now = hrtimer_cb_get_time(timer);
389 * FIXME: What we really want, is to stop this
390 * timer completely and restart it in case the
391 * SIG_IGN is removed. This is a non trivial
392 * change which involves sighand locking
393 * (sigh !), which we don't want to do late in
396 * For now we just let timers with an interval
397 * less than a jiffie expire every jiffie to
398 * avoid softirq starvation in case of SIG_IGN
399 * and a very small interval, which would put
400 * the timer right back on the softirq pending
401 * list. By moving now ahead of time we trick
402 * hrtimer_forward() to expire the timer
403 * later, while we still maintain the overrun
404 * accuracy, but have some inconsistency in
405 * the timer_gettime() case. This is at least
406 * better than a starved softirq. A more
407 * complex fix which solves also another related
408 * inconsistency is already in the pipeline.
410 #ifdef CONFIG_HIGH_RES_TIMERS
412 ktime_t kj = NSEC_PER_SEC / HZ;
414 if (timr->it_interval < kj)
415 now = ktime_add(now, kj);
418 timr->it_overrun += (unsigned int)
419 hrtimer_forward(timer, now,
421 ret = HRTIMER_RESTART;
422 ++timr->it_requeue_pending;
427 unlock_timer(timr, flags);
431 static struct pid *good_sigevent(sigevent_t * event)
433 struct task_struct *rtn = current->group_leader;
435 if ((event->sigev_notify & SIGEV_THREAD_ID ) &&
436 (!(rtn = find_task_by_vpid(event->sigev_notify_thread_id)) ||
437 !same_thread_group(rtn, current) ||
438 (event->sigev_notify & ~SIGEV_THREAD_ID) != SIGEV_SIGNAL))
441 if (((event->sigev_notify & ~SIGEV_THREAD_ID) != SIGEV_NONE) &&
442 ((event->sigev_signo <= 0) || (event->sigev_signo > SIGRTMAX)))
445 return task_pid(rtn);
448 static struct k_itimer * alloc_posix_timer(void)
450 struct k_itimer *tmr;
451 tmr = kmem_cache_zalloc(posix_timers_cache, GFP_KERNEL);
454 if (unlikely(!(tmr->sigq = sigqueue_alloc()))) {
455 kmem_cache_free(posix_timers_cache, tmr);
458 memset(&tmr->sigq->info, 0, sizeof(siginfo_t));
462 static void k_itimer_rcu_free(struct rcu_head *head)
464 struct k_itimer *tmr = container_of(head, struct k_itimer, it.rcu);
466 kmem_cache_free(posix_timers_cache, tmr);
470 #define IT_ID_NOT_SET 0
471 static void release_posix_timer(struct k_itimer *tmr, int it_id_set)
475 spin_lock_irqsave(&hash_lock, flags);
476 hlist_del_rcu(&tmr->t_hash);
477 spin_unlock_irqrestore(&hash_lock, flags);
479 put_pid(tmr->it_pid);
480 sigqueue_free(tmr->sigq);
481 call_rcu(&tmr->it.rcu, k_itimer_rcu_free);
484 static int common_timer_create(struct k_itimer *new_timer)
486 hrtimer_init(&new_timer->it.real.timer, new_timer->it_clock, 0);
490 /* Create a POSIX.1b interval timer. */
492 SYSCALL_DEFINE3(timer_create, const clockid_t, which_clock,
493 struct sigevent __user *, timer_event_spec,
494 timer_t __user *, created_timer_id)
496 const struct k_clock *kc = clockid_to_kclock(which_clock);
497 struct k_itimer *new_timer;
498 int error, new_timer_id;
500 int it_id_set = IT_ID_NOT_SET;
504 if (!kc->timer_create)
507 new_timer = alloc_posix_timer();
508 if (unlikely(!new_timer))
511 spin_lock_init(&new_timer->it_lock);
512 new_timer_id = posix_timer_add(new_timer);
513 if (new_timer_id < 0) {
514 error = new_timer_id;
518 it_id_set = IT_ID_SET;
519 new_timer->it_id = (timer_t) new_timer_id;
520 new_timer->it_clock = which_clock;
521 new_timer->kclock = kc;
522 new_timer->it_overrun = -1;
524 if (timer_event_spec) {
525 if (copy_from_user(&event, timer_event_spec, sizeof (event))) {
530 new_timer->it_pid = get_pid(good_sigevent(&event));
532 if (!new_timer->it_pid) {
537 memset(&event.sigev_value, 0, sizeof(event.sigev_value));
538 event.sigev_notify = SIGEV_SIGNAL;
539 event.sigev_signo = SIGALRM;
540 event.sigev_value.sival_int = new_timer->it_id;
541 new_timer->it_pid = get_pid(task_tgid(current));
544 new_timer->it_sigev_notify = event.sigev_notify;
545 new_timer->sigq->info.si_signo = event.sigev_signo;
546 new_timer->sigq->info.si_value = event.sigev_value;
547 new_timer->sigq->info.si_tid = new_timer->it_id;
548 new_timer->sigq->info.si_code = SI_TIMER;
550 if (copy_to_user(created_timer_id,
551 &new_timer_id, sizeof (new_timer_id))) {
556 error = kc->timer_create(new_timer);
560 spin_lock_irq(¤t->sighand->siglock);
561 new_timer->it_signal = current->signal;
562 list_add(&new_timer->list, ¤t->signal->posix_timers);
563 spin_unlock_irq(¤t->sighand->siglock);
567 * In the case of the timer belonging to another task, after
568 * the task is unlocked, the timer is owned by the other task
569 * and may cease to exist at any time. Don't use or modify
570 * new_timer after the unlock call.
573 release_posix_timer(new_timer, it_id_set);
578 * Locking issues: We need to protect the result of the id look up until
579 * we get the timer locked down so it is not deleted under us. The
580 * removal is done under the idr spinlock so we use that here to bridge
581 * the find to the timer lock. To avoid a dead lock, the timer id MUST
582 * be release with out holding the timer lock.
584 static struct k_itimer *__lock_timer(timer_t timer_id, unsigned long *flags)
586 struct k_itimer *timr;
589 * timer_t could be any type >= int and we want to make sure any
590 * @timer_id outside positive int range fails lookup.
592 if ((unsigned long long)timer_id > INT_MAX)
596 timr = posix_timer_by_id(timer_id);
598 spin_lock_irqsave(&timr->it_lock, *flags);
599 if (timr->it_signal == current->signal) {
603 spin_unlock_irqrestore(&timr->it_lock, *flags);
610 static ktime_t common_hrtimer_remaining(struct k_itimer *timr, ktime_t now)
612 struct hrtimer *timer = &timr->it.real.timer;
614 return __hrtimer_expires_remaining_adjusted(timer, now);
617 static int common_hrtimer_forward(struct k_itimer *timr, ktime_t now)
619 struct hrtimer *timer = &timr->it.real.timer;
621 return (int)hrtimer_forward(timer, now, timr->it_interval);
625 * Get the time remaining on a POSIX.1b interval timer. This function
626 * is ALWAYS called with spin_lock_irq on the timer, thus it must not
629 * We have a couple of messes to clean up here. First there is the case
630 * of a timer that has a requeue pending. These timers should appear to
631 * be in the timer list with an expiry as if we were to requeue them
634 * The second issue is the SIGEV_NONE timer which may be active but is
635 * not really ever put in the timer list (to save system resources).
636 * This timer may be expired, and if so, we will do it here. Otherwise
637 * it is the same as a requeue pending timer WRT to what we should
640 void common_timer_get(struct k_itimer *timr, struct itimerspec64 *cur_setting)
642 const struct k_clock *kc = timr->kclock;
643 ktime_t now, remaining, iv;
644 struct timespec64 ts64;
647 sig_none = (timr->it_sigev_notify & ~SIGEV_THREAD_ID) == SIGEV_NONE;
648 iv = timr->it_interval;
650 /* interval timer ? */
652 cur_setting->it_interval = ktime_to_timespec64(iv);
653 } else if (!timr->it_active) {
655 * SIGEV_NONE oneshot timers are never queued. Check them
663 * The timespec64 based conversion is suboptimal, but it's not
664 * worth to implement yet another callback.
666 kc->clock_get(timr->it_clock, &ts64);
667 now = timespec64_to_ktime(ts64);
670 * When a requeue is pending or this is a SIGEV_NONE timer move the
671 * expiry time forward by intervals, so expiry is > now.
673 if (iv && (timr->it_requeue_pending & REQUEUE_PENDING || sig_none))
674 timr->it_overrun += kc->timer_forward(timr, now);
676 remaining = kc->timer_remaining(timr, now);
677 /* Return 0 only, when the timer is expired and not pending */
678 if (remaining <= 0) {
680 * A single shot SIGEV_NONE timer must return 0, when
684 cur_setting->it_value.tv_nsec = 1;
686 cur_setting->it_value = ktime_to_timespec64(remaining);
690 /* Get the time remaining on a POSIX.1b interval timer. */
691 SYSCALL_DEFINE2(timer_gettime, timer_t, timer_id,
692 struct itimerspec __user *, setting)
694 struct itimerspec64 cur_setting64;
695 struct itimerspec cur_setting;
696 struct k_itimer *timr;
697 const struct k_clock *kc;
701 timr = lock_timer(timer_id, &flags);
705 memset(&cur_setting64, 0, sizeof(cur_setting64));
707 if (WARN_ON_ONCE(!kc || !kc->timer_get))
710 kc->timer_get(timr, &cur_setting64);
712 unlock_timer(timr, flags);
714 cur_setting = itimerspec64_to_itimerspec(&cur_setting64);
715 if (!ret && copy_to_user(setting, &cur_setting, sizeof (cur_setting)))
722 * Get the number of overruns of a POSIX.1b interval timer. This is to
723 * be the overrun of the timer last delivered. At the same time we are
724 * accumulating overruns on the next timer. The overrun is frozen when
725 * the signal is delivered, either at the notify time (if the info block
726 * is not queued) or at the actual delivery time (as we are informed by
727 * the call back to posixtimer_rearm(). So all we need to do is
728 * to pick up the frozen overrun.
730 SYSCALL_DEFINE1(timer_getoverrun, timer_t, timer_id)
732 struct k_itimer *timr;
736 timr = lock_timer(timer_id, &flags);
740 overrun = timr->it_overrun_last;
741 unlock_timer(timr, flags);
746 static void common_hrtimer_arm(struct k_itimer *timr, ktime_t expires,
747 bool absolute, bool sigev_none)
749 struct hrtimer *timer = &timr->it.real.timer;
750 enum hrtimer_mode mode;
752 mode = absolute ? HRTIMER_MODE_ABS : HRTIMER_MODE_REL;
753 hrtimer_init(&timr->it.real.timer, timr->it_clock, mode);
754 timr->it.real.timer.function = posix_timer_fn;
757 expires = ktime_add_safe(expires, timer->base->get_time());
758 hrtimer_set_expires(timer, expires);
761 hrtimer_start_expires(timer, HRTIMER_MODE_ABS);
764 static int common_hrtimer_try_to_cancel(struct k_itimer *timr)
766 return hrtimer_try_to_cancel(&timr->it.real.timer);
769 /* Set a POSIX.1b interval timer. */
770 int common_timer_set(struct k_itimer *timr, int flags,
771 struct itimerspec64 *new_setting,
772 struct itimerspec64 *old_setting)
774 const struct k_clock *kc = timr->kclock;
779 common_timer_get(timr, old_setting);
781 /* Prevent rearming by clearing the interval */
782 timr->it_interval = 0;
784 * Careful here. On SMP systems the timer expiry function could be
785 * active and spinning on timr->it_lock.
787 if (kc->timer_try_to_cancel(timr) < 0)
791 timr->it_requeue_pending = (timr->it_requeue_pending + 2) &
793 timr->it_overrun_last = 0;
795 /* Switch off the timer when it_value is zero */
796 if (!new_setting->it_value.tv_sec && !new_setting->it_value.tv_nsec)
799 timr->it_interval = timespec64_to_ktime(new_setting->it_interval);
800 expires = timespec64_to_ktime(new_setting->it_value);
801 sigev_none = (timr->it_sigev_notify & ~SIGEV_THREAD_ID) == SIGEV_NONE;
803 kc->timer_arm(timr, expires, flags & TIMER_ABSTIME, sigev_none);
804 timr->it_active = !sigev_none;
808 /* Set a POSIX.1b interval timer */
809 SYSCALL_DEFINE4(timer_settime, timer_t, timer_id, int, flags,
810 const struct itimerspec __user *, new_setting,
811 struct itimerspec __user *, old_setting)
813 struct itimerspec64 new_spec64, old_spec64;
814 struct itimerspec64 *rtn = old_setting ? &old_spec64 : NULL;
815 struct itimerspec new_spec, old_spec;
816 struct k_itimer *timr;
818 const struct k_clock *kc;
824 if (copy_from_user(&new_spec, new_setting, sizeof (new_spec)))
826 new_spec64 = itimerspec_to_itimerspec64(&new_spec);
828 if (!timespec64_valid(&new_spec64.it_interval) ||
829 !timespec64_valid(&new_spec64.it_value))
832 memset(rtn, 0, sizeof(*rtn));
834 timr = lock_timer(timer_id, &flag);
839 if (WARN_ON_ONCE(!kc || !kc->timer_set))
842 error = kc->timer_set(timr, flags, &new_spec64, rtn);
844 unlock_timer(timr, flag);
845 if (error == TIMER_RETRY) {
846 rtn = NULL; // We already got the old time...
850 old_spec = itimerspec64_to_itimerspec(&old_spec64);
851 if (old_setting && !error &&
852 copy_to_user(old_setting, &old_spec, sizeof (old_spec)))
858 int common_timer_del(struct k_itimer *timer)
860 const struct k_clock *kc = timer->kclock;
862 timer->it_interval = 0;
863 if (kc->timer_try_to_cancel(timer) < 0)
865 timer->it_active = 0;
869 static inline int timer_delete_hook(struct k_itimer *timer)
871 const struct k_clock *kc = timer->kclock;
873 if (WARN_ON_ONCE(!kc || !kc->timer_del))
875 return kc->timer_del(timer);
878 /* Delete a POSIX.1b interval timer. */
879 SYSCALL_DEFINE1(timer_delete, timer_t, timer_id)
881 struct k_itimer *timer;
885 timer = lock_timer(timer_id, &flags);
889 if (timer_delete_hook(timer) == TIMER_RETRY) {
890 unlock_timer(timer, flags);
894 spin_lock(¤t->sighand->siglock);
895 list_del(&timer->list);
896 spin_unlock(¤t->sighand->siglock);
898 * This keeps any tasks waiting on the spin lock from thinking
899 * they got something (see the lock code above).
901 timer->it_signal = NULL;
903 unlock_timer(timer, flags);
904 release_posix_timer(timer, IT_ID_SET);
909 * return timer owned by the process, used by exit_itimers
911 static void itimer_delete(struct k_itimer *timer)
916 spin_lock_irqsave(&timer->it_lock, flags);
918 if (timer_delete_hook(timer) == TIMER_RETRY) {
919 unlock_timer(timer, flags);
922 list_del(&timer->list);
924 * This keeps any tasks waiting on the spin lock from thinking
925 * they got something (see the lock code above).
927 timer->it_signal = NULL;
929 unlock_timer(timer, flags);
930 release_posix_timer(timer, IT_ID_SET);
934 * This is called by do_exit or de_thread, only when there are no more
935 * references to the shared signal_struct.
937 void exit_itimers(struct signal_struct *sig)
939 struct k_itimer *tmr;
941 while (!list_empty(&sig->posix_timers)) {
942 tmr = list_entry(sig->posix_timers.next, struct k_itimer, list);
947 SYSCALL_DEFINE2(clock_settime, const clockid_t, which_clock,
948 const struct timespec __user *, tp)
950 const struct k_clock *kc = clockid_to_kclock(which_clock);
951 struct timespec64 new_tp64;
952 struct timespec new_tp;
954 if (!kc || !kc->clock_set)
957 if (copy_from_user(&new_tp, tp, sizeof (*tp)))
959 new_tp64 = timespec_to_timespec64(new_tp);
961 return kc->clock_set(which_clock, &new_tp64);
964 SYSCALL_DEFINE2(clock_gettime, const clockid_t, which_clock,
965 struct timespec __user *,tp)
967 const struct k_clock *kc = clockid_to_kclock(which_clock);
968 struct timespec64 kernel_tp64;
969 struct timespec kernel_tp;
975 error = kc->clock_get(which_clock, &kernel_tp64);
976 kernel_tp = timespec64_to_timespec(kernel_tp64);
978 if (!error && copy_to_user(tp, &kernel_tp, sizeof (kernel_tp)))
984 SYSCALL_DEFINE2(clock_adjtime, const clockid_t, which_clock,
985 struct timex __user *, utx)
987 const struct k_clock *kc = clockid_to_kclock(which_clock);
996 if (copy_from_user(&ktx, utx, sizeof(ktx)))
999 err = kc->clock_adj(which_clock, &ktx);
1001 if (err >= 0 && copy_to_user(utx, &ktx, sizeof(ktx)))
1007 SYSCALL_DEFINE2(clock_getres, const clockid_t, which_clock,
1008 struct timespec __user *, tp)
1010 const struct k_clock *kc = clockid_to_kclock(which_clock);
1011 struct timespec64 rtn_tp64;
1012 struct timespec rtn_tp;
1018 error = kc->clock_getres(which_clock, &rtn_tp64);
1019 rtn_tp = timespec64_to_timespec(rtn_tp64);
1021 if (!error && tp && copy_to_user(tp, &rtn_tp, sizeof (rtn_tp)))
1028 * nanosleep for monotonic and realtime clocks
1030 static int common_nsleep(const clockid_t which_clock, int flags,
1031 struct timespec64 *tsave, struct timespec __user *rmtp)
1033 return hrtimer_nanosleep(tsave, rmtp, flags & TIMER_ABSTIME ?
1034 HRTIMER_MODE_ABS : HRTIMER_MODE_REL,
1038 SYSCALL_DEFINE4(clock_nanosleep, const clockid_t, which_clock, int, flags,
1039 const struct timespec __user *, rqtp,
1040 struct timespec __user *, rmtp)
1042 const struct k_clock *kc = clockid_to_kclock(which_clock);
1043 struct timespec64 t64;
1049 return -ENANOSLEEP_NOTSUP;
1051 if (copy_from_user(&t, rqtp, sizeof (struct timespec)))
1054 t64 = timespec_to_timespec64(t);
1055 if (!timespec64_valid(&t64))
1058 return kc->nsleep(which_clock, flags, &t64, rmtp);
1062 * This will restart clock_nanosleep. This is required only by
1063 * compat_clock_nanosleep_restart for now.
1065 long clock_nanosleep_restart(struct restart_block *restart_block)
1067 clockid_t which_clock = restart_block->nanosleep.clockid;
1068 const struct k_clock *kc = clockid_to_kclock(which_clock);
1070 if (WARN_ON_ONCE(!kc || !kc->nsleep_restart))
1073 return kc->nsleep_restart(restart_block);
1076 static const struct k_clock clock_realtime = {
1077 .clock_getres = posix_get_hrtimer_res,
1078 .clock_get = posix_clock_realtime_get,
1079 .clock_set = posix_clock_realtime_set,
1080 .clock_adj = posix_clock_realtime_adj,
1081 .nsleep = common_nsleep,
1082 .nsleep_restart = hrtimer_nanosleep_restart,
1083 .timer_create = common_timer_create,
1084 .timer_set = common_timer_set,
1085 .timer_get = common_timer_get,
1086 .timer_del = common_timer_del,
1087 .timer_rearm = common_hrtimer_rearm,
1088 .timer_forward = common_hrtimer_forward,
1089 .timer_remaining = common_hrtimer_remaining,
1090 .timer_try_to_cancel = common_hrtimer_try_to_cancel,
1091 .timer_arm = common_hrtimer_arm,
1094 static const struct k_clock clock_monotonic = {
1095 .clock_getres = posix_get_hrtimer_res,
1096 .clock_get = posix_ktime_get_ts,
1097 .nsleep = common_nsleep,
1098 .nsleep_restart = hrtimer_nanosleep_restart,
1099 .timer_create = common_timer_create,
1100 .timer_set = common_timer_set,
1101 .timer_get = common_timer_get,
1102 .timer_del = common_timer_del,
1103 .timer_rearm = common_hrtimer_rearm,
1104 .timer_forward = common_hrtimer_forward,
1105 .timer_remaining = common_hrtimer_remaining,
1106 .timer_try_to_cancel = common_hrtimer_try_to_cancel,
1107 .timer_arm = common_hrtimer_arm,
1110 static const struct k_clock clock_monotonic_raw = {
1111 .clock_getres = posix_get_hrtimer_res,
1112 .clock_get = posix_get_monotonic_raw,
1115 static const struct k_clock clock_realtime_coarse = {
1116 .clock_getres = posix_get_coarse_res,
1117 .clock_get = posix_get_realtime_coarse,
1120 static const struct k_clock clock_monotonic_coarse = {
1121 .clock_getres = posix_get_coarse_res,
1122 .clock_get = posix_get_monotonic_coarse,
1125 static const struct k_clock clock_tai = {
1126 .clock_getres = posix_get_hrtimer_res,
1127 .clock_get = posix_get_tai,
1128 .nsleep = common_nsleep,
1129 .nsleep_restart = hrtimer_nanosleep_restart,
1130 .timer_create = common_timer_create,
1131 .timer_set = common_timer_set,
1132 .timer_get = common_timer_get,
1133 .timer_del = common_timer_del,
1134 .timer_rearm = common_hrtimer_rearm,
1135 .timer_forward = common_hrtimer_forward,
1136 .timer_remaining = common_hrtimer_remaining,
1137 .timer_try_to_cancel = common_hrtimer_try_to_cancel,
1138 .timer_arm = common_hrtimer_arm,
1141 static const struct k_clock clock_boottime = {
1142 .clock_getres = posix_get_hrtimer_res,
1143 .clock_get = posix_get_boottime,
1144 .nsleep = common_nsleep,
1145 .nsleep_restart = hrtimer_nanosleep_restart,
1146 .timer_create = common_timer_create,
1147 .timer_set = common_timer_set,
1148 .timer_get = common_timer_get,
1149 .timer_del = common_timer_del,
1150 .timer_rearm = common_hrtimer_rearm,
1151 .timer_forward = common_hrtimer_forward,
1152 .timer_remaining = common_hrtimer_remaining,
1153 .timer_try_to_cancel = common_hrtimer_try_to_cancel,
1154 .timer_arm = common_hrtimer_arm,
1157 static const struct k_clock * const posix_clocks[] = {
1158 [CLOCK_REALTIME] = &clock_realtime,
1159 [CLOCK_MONOTONIC] = &clock_monotonic,
1160 [CLOCK_PROCESS_CPUTIME_ID] = &clock_process,
1161 [CLOCK_THREAD_CPUTIME_ID] = &clock_thread,
1162 [CLOCK_MONOTONIC_RAW] = &clock_monotonic_raw,
1163 [CLOCK_REALTIME_COARSE] = &clock_realtime_coarse,
1164 [CLOCK_MONOTONIC_COARSE] = &clock_monotonic_coarse,
1165 [CLOCK_BOOTTIME] = &clock_boottime,
1166 [CLOCK_REALTIME_ALARM] = &alarm_clock,
1167 [CLOCK_BOOTTIME_ALARM] = &alarm_clock,
1168 [CLOCK_TAI] = &clock_tai,
1171 static const struct k_clock *clockid_to_kclock(const clockid_t id)
1174 return (id & CLOCKFD_MASK) == CLOCKFD ?
1175 &clock_posix_dynamic : &clock_posix_cpu;
1177 if (id >= ARRAY_SIZE(posix_clocks) || !posix_clocks[id])
1179 return posix_clocks[id];