4 * Copyright (C) 1991, 1992 Linus Torvalds
7 #include <linux/export.h>
9 #include <linux/utsname.h>
10 #include <linux/mman.h>
11 #include <linux/reboot.h>
12 #include <linux/prctl.h>
13 #include <linux/highuid.h>
15 #include <linux/kmod.h>
16 #include <linux/perf_event.h>
17 #include <linux/resource.h>
18 #include <linux/kernel.h>
19 #include <linux/workqueue.h>
20 #include <linux/capability.h>
21 #include <linux/device.h>
22 #include <linux/key.h>
23 #include <linux/times.h>
24 #include <linux/posix-timers.h>
25 #include <linux/security.h>
26 #include <linux/dcookies.h>
27 #include <linux/suspend.h>
28 #include <linux/tty.h>
29 #include <linux/signal.h>
30 #include <linux/cn_proc.h>
31 #include <linux/getcpu.h>
32 #include <linux/task_io_accounting_ops.h>
33 #include <linux/seccomp.h>
34 #include <linux/cpu.h>
35 #include <linux/personality.h>
36 #include <linux/ptrace.h>
37 #include <linux/fs_struct.h>
38 #include <linux/file.h>
39 #include <linux/mount.h>
40 #include <linux/gfp.h>
41 #include <linux/syscore_ops.h>
42 #include <linux/version.h>
43 #include <linux/ctype.h>
45 #include <linux/compat.h>
46 #include <linux/syscalls.h>
47 #include <linux/kprobes.h>
48 #include <linux/user_namespace.h>
49 #include <linux/binfmts.h>
51 #include <linux/sched.h>
52 #include <linux/sched/autogroup.h>
53 #include <linux/sched/loadavg.h>
54 #include <linux/sched/mm.h>
55 #include <linux/sched/coredump.h>
56 #include <linux/rcupdate.h>
57 #include <linux/uidgid.h>
58 #include <linux/cred.h>
60 #include <linux/kmsg_dump.h>
61 /* Move somewhere else to avoid recompiling? */
62 #include <generated/utsrelease.h>
64 #include <linux/uaccess.h>
66 #include <asm/unistd.h>
68 #ifndef SET_UNALIGN_CTL
69 # define SET_UNALIGN_CTL(a, b) (-EINVAL)
71 #ifndef GET_UNALIGN_CTL
72 # define GET_UNALIGN_CTL(a, b) (-EINVAL)
75 # define SET_FPEMU_CTL(a, b) (-EINVAL)
78 # define GET_FPEMU_CTL(a, b) (-EINVAL)
81 # define SET_FPEXC_CTL(a, b) (-EINVAL)
84 # define GET_FPEXC_CTL(a, b) (-EINVAL)
87 # define GET_ENDIAN(a, b) (-EINVAL)
90 # define SET_ENDIAN(a, b) (-EINVAL)
93 # define GET_TSC_CTL(a) (-EINVAL)
96 # define SET_TSC_CTL(a) (-EINVAL)
98 #ifndef MPX_ENABLE_MANAGEMENT
99 # define MPX_ENABLE_MANAGEMENT() (-EINVAL)
101 #ifndef MPX_DISABLE_MANAGEMENT
102 # define MPX_DISABLE_MANAGEMENT() (-EINVAL)
105 # define GET_FP_MODE(a) (-EINVAL)
108 # define SET_FP_MODE(a,b) (-EINVAL)
112 * this is where the system-wide overflow UID and GID are defined, for
113 * architectures that now have 32-bit UID/GID but didn't in the past
116 int overflowuid = DEFAULT_OVERFLOWUID;
117 int overflowgid = DEFAULT_OVERFLOWGID;
119 EXPORT_SYMBOL(overflowuid);
120 EXPORT_SYMBOL(overflowgid);
123 * the same as above, but for filesystems which can only store a 16-bit
124 * UID and GID. as such, this is needed on all architectures
127 int fs_overflowuid = DEFAULT_FS_OVERFLOWUID;
128 int fs_overflowgid = DEFAULT_FS_OVERFLOWUID;
130 EXPORT_SYMBOL(fs_overflowuid);
131 EXPORT_SYMBOL(fs_overflowgid);
134 * Returns true if current's euid is same as p's uid or euid,
135 * or has CAP_SYS_NICE to p's user_ns.
137 * Called with rcu_read_lock, creds are safe
139 static bool set_one_prio_perm(struct task_struct *p)
141 const struct cred *cred = current_cred(), *pcred = __task_cred(p);
143 if (uid_eq(pcred->uid, cred->euid) ||
144 uid_eq(pcred->euid, cred->euid))
146 if (ns_capable(pcred->user_ns, CAP_SYS_NICE))
152 * set the priority of a task
153 * - the caller must hold the RCU read lock
155 static int set_one_prio(struct task_struct *p, int niceval, int error)
159 if (!set_one_prio_perm(p)) {
163 if (niceval < task_nice(p) && !can_nice(p, niceval)) {
167 no_nice = security_task_setnice(p, niceval);
174 set_user_nice(p, niceval);
179 SYSCALL_DEFINE3(setpriority, int, which, int, who, int, niceval)
181 struct task_struct *g, *p;
182 struct user_struct *user;
183 const struct cred *cred = current_cred();
188 if (which > PRIO_USER || which < PRIO_PROCESS)
191 /* normalize: avoid signed division (rounding problems) */
193 if (niceval < MIN_NICE)
195 if (niceval > MAX_NICE)
199 read_lock(&tasklist_lock);
203 p = find_task_by_vpid(who);
207 error = set_one_prio(p, niceval, error);
211 pgrp = find_vpid(who);
213 pgrp = task_pgrp(current);
214 do_each_pid_thread(pgrp, PIDTYPE_PGID, p) {
215 error = set_one_prio(p, niceval, error);
216 } while_each_pid_thread(pgrp, PIDTYPE_PGID, p);
219 uid = make_kuid(cred->user_ns, who);
223 else if (!uid_eq(uid, cred->uid)) {
224 user = find_user(uid);
226 goto out_unlock; /* No processes for this user */
228 do_each_thread(g, p) {
229 if (uid_eq(task_uid(p), uid) && task_pid_vnr(p))
230 error = set_one_prio(p, niceval, error);
231 } while_each_thread(g, p);
232 if (!uid_eq(uid, cred->uid))
233 free_uid(user); /* For find_user() */
237 read_unlock(&tasklist_lock);
244 * Ugh. To avoid negative return values, "getpriority()" will
245 * not return the normal nice-value, but a negated value that
246 * has been offset by 20 (ie it returns 40..1 instead of -20..19)
247 * to stay compatible.
249 SYSCALL_DEFINE2(getpriority, int, which, int, who)
251 struct task_struct *g, *p;
252 struct user_struct *user;
253 const struct cred *cred = current_cred();
254 long niceval, retval = -ESRCH;
258 if (which > PRIO_USER || which < PRIO_PROCESS)
262 read_lock(&tasklist_lock);
266 p = find_task_by_vpid(who);
270 niceval = nice_to_rlimit(task_nice(p));
271 if (niceval > retval)
277 pgrp = find_vpid(who);
279 pgrp = task_pgrp(current);
280 do_each_pid_thread(pgrp, PIDTYPE_PGID, p) {
281 niceval = nice_to_rlimit(task_nice(p));
282 if (niceval > retval)
284 } while_each_pid_thread(pgrp, PIDTYPE_PGID, p);
287 uid = make_kuid(cred->user_ns, who);
291 else if (!uid_eq(uid, cred->uid)) {
292 user = find_user(uid);
294 goto out_unlock; /* No processes for this user */
296 do_each_thread(g, p) {
297 if (uid_eq(task_uid(p), uid) && task_pid_vnr(p)) {
298 niceval = nice_to_rlimit(task_nice(p));
299 if (niceval > retval)
302 } while_each_thread(g, p);
303 if (!uid_eq(uid, cred->uid))
304 free_uid(user); /* for find_user() */
308 read_unlock(&tasklist_lock);
315 * Unprivileged users may change the real gid to the effective gid
316 * or vice versa. (BSD-style)
318 * If you set the real gid at all, or set the effective gid to a value not
319 * equal to the real gid, then the saved gid is set to the new effective gid.
321 * This makes it possible for a setgid program to completely drop its
322 * privileges, which is often a useful assertion to make when you are doing
323 * a security audit over a program.
325 * The general idea is that a program which uses just setregid() will be
326 * 100% compatible with BSD. A program which uses just setgid() will be
327 * 100% compatible with POSIX with saved IDs.
329 * SMP: There are not races, the GIDs are checked only by filesystem
330 * operations (as far as semantic preservation is concerned).
332 #ifdef CONFIG_MULTIUSER
333 SYSCALL_DEFINE2(setregid, gid_t, rgid, gid_t, egid)
335 struct user_namespace *ns = current_user_ns();
336 const struct cred *old;
341 krgid = make_kgid(ns, rgid);
342 kegid = make_kgid(ns, egid);
344 if ((rgid != (gid_t) -1) && !gid_valid(krgid))
346 if ((egid != (gid_t) -1) && !gid_valid(kegid))
349 new = prepare_creds();
352 old = current_cred();
355 if (rgid != (gid_t) -1) {
356 if (gid_eq(old->gid, krgid) ||
357 gid_eq(old->egid, krgid) ||
358 ns_capable(old->user_ns, CAP_SETGID))
363 if (egid != (gid_t) -1) {
364 if (gid_eq(old->gid, kegid) ||
365 gid_eq(old->egid, kegid) ||
366 gid_eq(old->sgid, kegid) ||
367 ns_capable(old->user_ns, CAP_SETGID))
373 if (rgid != (gid_t) -1 ||
374 (egid != (gid_t) -1 && !gid_eq(kegid, old->gid)))
375 new->sgid = new->egid;
376 new->fsgid = new->egid;
378 return commit_creds(new);
386 * setgid() is implemented like SysV w/ SAVED_IDS
388 * SMP: Same implicit races as above.
390 SYSCALL_DEFINE1(setgid, gid_t, gid)
392 struct user_namespace *ns = current_user_ns();
393 const struct cred *old;
398 kgid = make_kgid(ns, gid);
399 if (!gid_valid(kgid))
402 new = prepare_creds();
405 old = current_cred();
408 if (ns_capable(old->user_ns, CAP_SETGID))
409 new->gid = new->egid = new->sgid = new->fsgid = kgid;
410 else if (gid_eq(kgid, old->gid) || gid_eq(kgid, old->sgid))
411 new->egid = new->fsgid = kgid;
415 return commit_creds(new);
423 * change the user struct in a credentials set to match the new UID
425 static int set_user(struct cred *new)
427 struct user_struct *new_user;
429 new_user = alloc_uid(new->uid);
434 * We don't fail in case of NPROC limit excess here because too many
435 * poorly written programs don't check set*uid() return code, assuming
436 * it never fails if called by root. We may still enforce NPROC limit
437 * for programs doing set*uid()+execve() by harmlessly deferring the
438 * failure to the execve() stage.
440 if (atomic_read(&new_user->processes) >= rlimit(RLIMIT_NPROC) &&
441 new_user != INIT_USER)
442 current->flags |= PF_NPROC_EXCEEDED;
444 current->flags &= ~PF_NPROC_EXCEEDED;
447 new->user = new_user;
452 * Unprivileged users may change the real uid to the effective uid
453 * or vice versa. (BSD-style)
455 * If you set the real uid at all, or set the effective uid to a value not
456 * equal to the real uid, then the saved uid is set to the new effective uid.
458 * This makes it possible for a setuid program to completely drop its
459 * privileges, which is often a useful assertion to make when you are doing
460 * a security audit over a program.
462 * The general idea is that a program which uses just setreuid() will be
463 * 100% compatible with BSD. A program which uses just setuid() will be
464 * 100% compatible with POSIX with saved IDs.
466 SYSCALL_DEFINE2(setreuid, uid_t, ruid, uid_t, euid)
468 struct user_namespace *ns = current_user_ns();
469 const struct cred *old;
474 kruid = make_kuid(ns, ruid);
475 keuid = make_kuid(ns, euid);
477 if ((ruid != (uid_t) -1) && !uid_valid(kruid))
479 if ((euid != (uid_t) -1) && !uid_valid(keuid))
482 new = prepare_creds();
485 old = current_cred();
488 if (ruid != (uid_t) -1) {
490 if (!uid_eq(old->uid, kruid) &&
491 !uid_eq(old->euid, kruid) &&
492 !ns_capable(old->user_ns, CAP_SETUID))
496 if (euid != (uid_t) -1) {
498 if (!uid_eq(old->uid, keuid) &&
499 !uid_eq(old->euid, keuid) &&
500 !uid_eq(old->suid, keuid) &&
501 !ns_capable(old->user_ns, CAP_SETUID))
505 if (!uid_eq(new->uid, old->uid)) {
506 retval = set_user(new);
510 if (ruid != (uid_t) -1 ||
511 (euid != (uid_t) -1 && !uid_eq(keuid, old->uid)))
512 new->suid = new->euid;
513 new->fsuid = new->euid;
515 retval = security_task_fix_setuid(new, old, LSM_SETID_RE);
519 return commit_creds(new);
527 * setuid() is implemented like SysV with SAVED_IDS
529 * Note that SAVED_ID's is deficient in that a setuid root program
530 * like sendmail, for example, cannot set its uid to be a normal
531 * user and then switch back, because if you're root, setuid() sets
532 * the saved uid too. If you don't like this, blame the bright people
533 * in the POSIX committee and/or USG. Note that the BSD-style setreuid()
534 * will allow a root program to temporarily drop privileges and be able to
535 * regain them by swapping the real and effective uid.
537 SYSCALL_DEFINE1(setuid, uid_t, uid)
539 struct user_namespace *ns = current_user_ns();
540 const struct cred *old;
545 kuid = make_kuid(ns, uid);
546 if (!uid_valid(kuid))
549 new = prepare_creds();
552 old = current_cred();
555 if (ns_capable(old->user_ns, CAP_SETUID)) {
556 new->suid = new->uid = kuid;
557 if (!uid_eq(kuid, old->uid)) {
558 retval = set_user(new);
562 } else if (!uid_eq(kuid, old->uid) && !uid_eq(kuid, new->suid)) {
566 new->fsuid = new->euid = kuid;
568 retval = security_task_fix_setuid(new, old, LSM_SETID_ID);
572 return commit_creds(new);
581 * This function implements a generic ability to update ruid, euid,
582 * and suid. This allows you to implement the 4.4 compatible seteuid().
584 SYSCALL_DEFINE3(setresuid, uid_t, ruid, uid_t, euid, uid_t, suid)
586 struct user_namespace *ns = current_user_ns();
587 const struct cred *old;
590 kuid_t kruid, keuid, ksuid;
592 kruid = make_kuid(ns, ruid);
593 keuid = make_kuid(ns, euid);
594 ksuid = make_kuid(ns, suid);
596 if ((ruid != (uid_t) -1) && !uid_valid(kruid))
599 if ((euid != (uid_t) -1) && !uid_valid(keuid))
602 if ((suid != (uid_t) -1) && !uid_valid(ksuid))
605 new = prepare_creds();
609 old = current_cred();
612 if (!ns_capable(old->user_ns, CAP_SETUID)) {
613 if (ruid != (uid_t) -1 && !uid_eq(kruid, old->uid) &&
614 !uid_eq(kruid, old->euid) && !uid_eq(kruid, old->suid))
616 if (euid != (uid_t) -1 && !uid_eq(keuid, old->uid) &&
617 !uid_eq(keuid, old->euid) && !uid_eq(keuid, old->suid))
619 if (suid != (uid_t) -1 && !uid_eq(ksuid, old->uid) &&
620 !uid_eq(ksuid, old->euid) && !uid_eq(ksuid, old->suid))
624 if (ruid != (uid_t) -1) {
626 if (!uid_eq(kruid, old->uid)) {
627 retval = set_user(new);
632 if (euid != (uid_t) -1)
634 if (suid != (uid_t) -1)
636 new->fsuid = new->euid;
638 retval = security_task_fix_setuid(new, old, LSM_SETID_RES);
642 return commit_creds(new);
649 SYSCALL_DEFINE3(getresuid, uid_t __user *, ruidp, uid_t __user *, euidp, uid_t __user *, suidp)
651 const struct cred *cred = current_cred();
653 uid_t ruid, euid, suid;
655 ruid = from_kuid_munged(cred->user_ns, cred->uid);
656 euid = from_kuid_munged(cred->user_ns, cred->euid);
657 suid = from_kuid_munged(cred->user_ns, cred->suid);
659 retval = put_user(ruid, ruidp);
661 retval = put_user(euid, euidp);
663 return put_user(suid, suidp);
669 * Same as above, but for rgid, egid, sgid.
671 SYSCALL_DEFINE3(setresgid, gid_t, rgid, gid_t, egid, gid_t, sgid)
673 struct user_namespace *ns = current_user_ns();
674 const struct cred *old;
677 kgid_t krgid, kegid, ksgid;
679 krgid = make_kgid(ns, rgid);
680 kegid = make_kgid(ns, egid);
681 ksgid = make_kgid(ns, sgid);
683 if ((rgid != (gid_t) -1) && !gid_valid(krgid))
685 if ((egid != (gid_t) -1) && !gid_valid(kegid))
687 if ((sgid != (gid_t) -1) && !gid_valid(ksgid))
690 new = prepare_creds();
693 old = current_cred();
696 if (!ns_capable(old->user_ns, CAP_SETGID)) {
697 if (rgid != (gid_t) -1 && !gid_eq(krgid, old->gid) &&
698 !gid_eq(krgid, old->egid) && !gid_eq(krgid, old->sgid))
700 if (egid != (gid_t) -1 && !gid_eq(kegid, old->gid) &&
701 !gid_eq(kegid, old->egid) && !gid_eq(kegid, old->sgid))
703 if (sgid != (gid_t) -1 && !gid_eq(ksgid, old->gid) &&
704 !gid_eq(ksgid, old->egid) && !gid_eq(ksgid, old->sgid))
708 if (rgid != (gid_t) -1)
710 if (egid != (gid_t) -1)
712 if (sgid != (gid_t) -1)
714 new->fsgid = new->egid;
716 return commit_creds(new);
723 SYSCALL_DEFINE3(getresgid, gid_t __user *, rgidp, gid_t __user *, egidp, gid_t __user *, sgidp)
725 const struct cred *cred = current_cred();
727 gid_t rgid, egid, sgid;
729 rgid = from_kgid_munged(cred->user_ns, cred->gid);
730 egid = from_kgid_munged(cred->user_ns, cred->egid);
731 sgid = from_kgid_munged(cred->user_ns, cred->sgid);
733 retval = put_user(rgid, rgidp);
735 retval = put_user(egid, egidp);
737 retval = put_user(sgid, sgidp);
745 * "setfsuid()" sets the fsuid - the uid used for filesystem checks. This
746 * is used for "access()" and for the NFS daemon (letting nfsd stay at
747 * whatever uid it wants to). It normally shadows "euid", except when
748 * explicitly set by setfsuid() or for access..
750 SYSCALL_DEFINE1(setfsuid, uid_t, uid)
752 const struct cred *old;
757 old = current_cred();
758 old_fsuid = from_kuid_munged(old->user_ns, old->fsuid);
760 kuid = make_kuid(old->user_ns, uid);
761 if (!uid_valid(kuid))
764 new = prepare_creds();
768 if (uid_eq(kuid, old->uid) || uid_eq(kuid, old->euid) ||
769 uid_eq(kuid, old->suid) || uid_eq(kuid, old->fsuid) ||
770 ns_capable(old->user_ns, CAP_SETUID)) {
771 if (!uid_eq(kuid, old->fsuid)) {
773 if (security_task_fix_setuid(new, old, LSM_SETID_FS) == 0)
787 * Samma på svenska..
789 SYSCALL_DEFINE1(setfsgid, gid_t, gid)
791 const struct cred *old;
796 old = current_cred();
797 old_fsgid = from_kgid_munged(old->user_ns, old->fsgid);
799 kgid = make_kgid(old->user_ns, gid);
800 if (!gid_valid(kgid))
803 new = prepare_creds();
807 if (gid_eq(kgid, old->gid) || gid_eq(kgid, old->egid) ||
808 gid_eq(kgid, old->sgid) || gid_eq(kgid, old->fsgid) ||
809 ns_capable(old->user_ns, CAP_SETGID)) {
810 if (!gid_eq(kgid, old->fsgid)) {
823 #endif /* CONFIG_MULTIUSER */
826 * sys_getpid - return the thread group id of the current process
828 * Note, despite the name, this returns the tgid not the pid. The tgid and
829 * the pid are identical unless CLONE_THREAD was specified on clone() in
830 * which case the tgid is the same in all threads of the same group.
832 * This is SMP safe as current->tgid does not change.
834 SYSCALL_DEFINE0(getpid)
836 return task_tgid_vnr(current);
839 /* Thread ID - the internal kernel "pid" */
840 SYSCALL_DEFINE0(gettid)
842 return task_pid_vnr(current);
846 * Accessing ->real_parent is not SMP-safe, it could
847 * change from under us. However, we can use a stale
848 * value of ->real_parent under rcu_read_lock(), see
849 * release_task()->call_rcu(delayed_put_task_struct).
851 SYSCALL_DEFINE0(getppid)
856 pid = task_tgid_vnr(rcu_dereference(current->real_parent));
862 SYSCALL_DEFINE0(getuid)
864 /* Only we change this so SMP safe */
865 return from_kuid_munged(current_user_ns(), current_uid());
868 SYSCALL_DEFINE0(geteuid)
870 /* Only we change this so SMP safe */
871 return from_kuid_munged(current_user_ns(), current_euid());
874 SYSCALL_DEFINE0(getgid)
876 /* Only we change this so SMP safe */
877 return from_kgid_munged(current_user_ns(), current_gid());
880 SYSCALL_DEFINE0(getegid)
882 /* Only we change this so SMP safe */
883 return from_kgid_munged(current_user_ns(), current_egid());
886 void do_sys_times(struct tms *tms)
888 u64 tgutime, tgstime, cutime, cstime;
890 thread_group_cputime_adjusted(current, &tgutime, &tgstime);
891 cutime = current->signal->cutime;
892 cstime = current->signal->cstime;
893 tms->tms_utime = nsec_to_clock_t(tgutime);
894 tms->tms_stime = nsec_to_clock_t(tgstime);
895 tms->tms_cutime = nsec_to_clock_t(cutime);
896 tms->tms_cstime = nsec_to_clock_t(cstime);
899 SYSCALL_DEFINE1(times, struct tms __user *, tbuf)
905 if (copy_to_user(tbuf, &tmp, sizeof(struct tms)))
908 force_successful_syscall_return();
909 return (long) jiffies_64_to_clock_t(get_jiffies_64());
913 * This needs some heavy checking ...
914 * I just haven't the stomach for it. I also don't fully
915 * understand sessions/pgrp etc. Let somebody who does explain it.
917 * OK, I think I have the protection semantics right.... this is really
918 * only important on a multi-user system anyway, to make sure one user
919 * can't send a signal to a process owned by another. -TYT, 12/12/91
921 * !PF_FORKNOEXEC check to conform completely to POSIX.
923 SYSCALL_DEFINE2(setpgid, pid_t, pid, pid_t, pgid)
925 struct task_struct *p;
926 struct task_struct *group_leader = current->group_leader;
931 pid = task_pid_vnr(group_leader);
938 /* From this point forward we keep holding onto the tasklist lock
939 * so that our parent does not change from under us. -DaveM
941 write_lock_irq(&tasklist_lock);
944 p = find_task_by_vpid(pid);
949 if (!thread_group_leader(p))
952 if (same_thread_group(p->real_parent, group_leader)) {
954 if (task_session(p) != task_session(group_leader))
957 if (!(p->flags & PF_FORKNOEXEC))
961 if (p != group_leader)
966 if (p->signal->leader)
971 struct task_struct *g;
973 pgrp = find_vpid(pgid);
974 g = pid_task(pgrp, PIDTYPE_PGID);
975 if (!g || task_session(g) != task_session(group_leader))
979 err = security_task_setpgid(p, pgid);
983 if (task_pgrp(p) != pgrp)
984 change_pid(p, PIDTYPE_PGID, pgrp);
988 /* All paths lead to here, thus we are safe. -DaveM */
989 write_unlock_irq(&tasklist_lock);
994 SYSCALL_DEFINE1(getpgid, pid_t, pid)
996 struct task_struct *p;
1002 grp = task_pgrp(current);
1005 p = find_task_by_vpid(pid);
1012 retval = security_task_getpgid(p);
1016 retval = pid_vnr(grp);
1022 #ifdef __ARCH_WANT_SYS_GETPGRP
1024 SYSCALL_DEFINE0(getpgrp)
1026 return sys_getpgid(0);
1031 SYSCALL_DEFINE1(getsid, pid_t, pid)
1033 struct task_struct *p;
1039 sid = task_session(current);
1042 p = find_task_by_vpid(pid);
1045 sid = task_session(p);
1049 retval = security_task_getsid(p);
1053 retval = pid_vnr(sid);
1059 static void set_special_pids(struct pid *pid)
1061 struct task_struct *curr = current->group_leader;
1063 if (task_session(curr) != pid)
1064 change_pid(curr, PIDTYPE_SID, pid);
1066 if (task_pgrp(curr) != pid)
1067 change_pid(curr, PIDTYPE_PGID, pid);
1070 SYSCALL_DEFINE0(setsid)
1072 struct task_struct *group_leader = current->group_leader;
1073 struct pid *sid = task_pid(group_leader);
1074 pid_t session = pid_vnr(sid);
1077 write_lock_irq(&tasklist_lock);
1078 /* Fail if I am already a session leader */
1079 if (group_leader->signal->leader)
1082 /* Fail if a process group id already exists that equals the
1083 * proposed session id.
1085 if (pid_task(sid, PIDTYPE_PGID))
1088 group_leader->signal->leader = 1;
1089 set_special_pids(sid);
1091 proc_clear_tty(group_leader);
1095 write_unlock_irq(&tasklist_lock);
1097 proc_sid_connector(group_leader);
1098 sched_autogroup_create_attach(group_leader);
1103 DECLARE_RWSEM(uts_sem);
1105 #ifdef COMPAT_UTS_MACHINE
1106 #define override_architecture(name) \
1107 (personality(current->personality) == PER_LINUX32 && \
1108 copy_to_user(name->machine, COMPAT_UTS_MACHINE, \
1109 sizeof(COMPAT_UTS_MACHINE)))
1111 #define override_architecture(name) 0
1115 * Work around broken programs that cannot handle "Linux 3.0".
1116 * Instead we map 3.x to 2.6.40+x, so e.g. 3.0 would be 2.6.40
1117 * And we map 4.x to 2.6.60+x, so 4.0 would be 2.6.60.
1119 static int override_release(char __user *release, size_t len)
1123 if (current->personality & UNAME26) {
1124 const char *rest = UTS_RELEASE;
1125 char buf[65] = { 0 };
1131 if (*rest == '.' && ++ndots >= 3)
1133 if (!isdigit(*rest) && *rest != '.')
1137 v = ((LINUX_VERSION_CODE >> 8) & 0xff) + 60;
1138 copy = clamp_t(size_t, len, 1, sizeof(buf));
1139 copy = scnprintf(buf, copy, "2.6.%u%s", v, rest);
1140 ret = copy_to_user(release, buf, copy + 1);
1145 SYSCALL_DEFINE1(newuname, struct new_utsname __user *, name)
1149 down_read(&uts_sem);
1150 if (copy_to_user(name, utsname(), sizeof *name))
1154 if (!errno && override_release(name->release, sizeof(name->release)))
1156 if (!errno && override_architecture(name))
1161 #ifdef __ARCH_WANT_SYS_OLD_UNAME
1165 SYSCALL_DEFINE1(uname, struct old_utsname __user *, name)
1172 down_read(&uts_sem);
1173 if (copy_to_user(name, utsname(), sizeof(*name)))
1177 if (!error && override_release(name->release, sizeof(name->release)))
1179 if (!error && override_architecture(name))
1184 SYSCALL_DEFINE1(olduname, struct oldold_utsname __user *, name)
1190 if (!access_ok(VERIFY_WRITE, name, sizeof(struct oldold_utsname)))
1193 down_read(&uts_sem);
1194 error = __copy_to_user(&name->sysname, &utsname()->sysname,
1196 error |= __put_user(0, name->sysname + __OLD_UTS_LEN);
1197 error |= __copy_to_user(&name->nodename, &utsname()->nodename,
1199 error |= __put_user(0, name->nodename + __OLD_UTS_LEN);
1200 error |= __copy_to_user(&name->release, &utsname()->release,
1202 error |= __put_user(0, name->release + __OLD_UTS_LEN);
1203 error |= __copy_to_user(&name->version, &utsname()->version,
1205 error |= __put_user(0, name->version + __OLD_UTS_LEN);
1206 error |= __copy_to_user(&name->machine, &utsname()->machine,
1208 error |= __put_user(0, name->machine + __OLD_UTS_LEN);
1211 if (!error && override_architecture(name))
1213 if (!error && override_release(name->release, sizeof(name->release)))
1215 return error ? -EFAULT : 0;
1219 SYSCALL_DEFINE2(sethostname, char __user *, name, int, len)
1222 char tmp[__NEW_UTS_LEN];
1224 if (!ns_capable(current->nsproxy->uts_ns->user_ns, CAP_SYS_ADMIN))
1227 if (len < 0 || len > __NEW_UTS_LEN)
1229 down_write(&uts_sem);
1231 if (!copy_from_user(tmp, name, len)) {
1232 struct new_utsname *u = utsname();
1234 memcpy(u->nodename, tmp, len);
1235 memset(u->nodename + len, 0, sizeof(u->nodename) - len);
1237 uts_proc_notify(UTS_PROC_HOSTNAME);
1243 #ifdef __ARCH_WANT_SYS_GETHOSTNAME
1245 SYSCALL_DEFINE2(gethostname, char __user *, name, int, len)
1248 struct new_utsname *u;
1252 down_read(&uts_sem);
1254 i = 1 + strlen(u->nodename);
1258 if (copy_to_user(name, u->nodename, i))
1267 * Only setdomainname; getdomainname can be implemented by calling
1270 SYSCALL_DEFINE2(setdomainname, char __user *, name, int, len)
1273 char tmp[__NEW_UTS_LEN];
1275 if (!ns_capable(current->nsproxy->uts_ns->user_ns, CAP_SYS_ADMIN))
1277 if (len < 0 || len > __NEW_UTS_LEN)
1280 down_write(&uts_sem);
1282 if (!copy_from_user(tmp, name, len)) {
1283 struct new_utsname *u = utsname();
1285 memcpy(u->domainname, tmp, len);
1286 memset(u->domainname + len, 0, sizeof(u->domainname) - len);
1288 uts_proc_notify(UTS_PROC_DOMAINNAME);
1294 SYSCALL_DEFINE2(getrlimit, unsigned int, resource, struct rlimit __user *, rlim)
1296 struct rlimit value;
1299 ret = do_prlimit(current, resource, NULL, &value);
1301 ret = copy_to_user(rlim, &value, sizeof(*rlim)) ? -EFAULT : 0;
1306 #ifdef __ARCH_WANT_SYS_OLD_GETRLIMIT
1309 * Back compatibility for getrlimit. Needed for some apps.
1311 SYSCALL_DEFINE2(old_getrlimit, unsigned int, resource,
1312 struct rlimit __user *, rlim)
1315 if (resource >= RLIM_NLIMITS)
1318 task_lock(current->group_leader);
1319 x = current->signal->rlim[resource];
1320 task_unlock(current->group_leader);
1321 if (x.rlim_cur > 0x7FFFFFFF)
1322 x.rlim_cur = 0x7FFFFFFF;
1323 if (x.rlim_max > 0x7FFFFFFF)
1324 x.rlim_max = 0x7FFFFFFF;
1325 return copy_to_user(rlim, &x, sizeof(x)) ? -EFAULT : 0;
1330 static inline bool rlim64_is_infinity(__u64 rlim64)
1332 #if BITS_PER_LONG < 64
1333 return rlim64 >= ULONG_MAX;
1335 return rlim64 == RLIM64_INFINITY;
1339 static void rlim_to_rlim64(const struct rlimit *rlim, struct rlimit64 *rlim64)
1341 if (rlim->rlim_cur == RLIM_INFINITY)
1342 rlim64->rlim_cur = RLIM64_INFINITY;
1344 rlim64->rlim_cur = rlim->rlim_cur;
1345 if (rlim->rlim_max == RLIM_INFINITY)
1346 rlim64->rlim_max = RLIM64_INFINITY;
1348 rlim64->rlim_max = rlim->rlim_max;
1351 static void rlim64_to_rlim(const struct rlimit64 *rlim64, struct rlimit *rlim)
1353 if (rlim64_is_infinity(rlim64->rlim_cur))
1354 rlim->rlim_cur = RLIM_INFINITY;
1356 rlim->rlim_cur = (unsigned long)rlim64->rlim_cur;
1357 if (rlim64_is_infinity(rlim64->rlim_max))
1358 rlim->rlim_max = RLIM_INFINITY;
1360 rlim->rlim_max = (unsigned long)rlim64->rlim_max;
1363 /* make sure you are allowed to change @tsk limits before calling this */
1364 int do_prlimit(struct task_struct *tsk, unsigned int resource,
1365 struct rlimit *new_rlim, struct rlimit *old_rlim)
1367 struct rlimit *rlim;
1370 if (resource >= RLIM_NLIMITS)
1373 if (new_rlim->rlim_cur > new_rlim->rlim_max)
1375 if (resource == RLIMIT_NOFILE &&
1376 new_rlim->rlim_max > sysctl_nr_open)
1380 /* protect tsk->signal and tsk->sighand from disappearing */
1381 read_lock(&tasklist_lock);
1382 if (!tsk->sighand) {
1387 rlim = tsk->signal->rlim + resource;
1388 task_lock(tsk->group_leader);
1390 /* Keep the capable check against init_user_ns until
1391 cgroups can contain all limits */
1392 if (new_rlim->rlim_max > rlim->rlim_max &&
1393 !capable(CAP_SYS_RESOURCE))
1396 retval = security_task_setrlimit(tsk->group_leader,
1397 resource, new_rlim);
1398 if (resource == RLIMIT_CPU && new_rlim->rlim_cur == 0) {
1400 * The caller is asking for an immediate RLIMIT_CPU
1401 * expiry. But we use the zero value to mean "it was
1402 * never set". So let's cheat and make it one second
1405 new_rlim->rlim_cur = 1;
1414 task_unlock(tsk->group_leader);
1417 * RLIMIT_CPU handling. Note that the kernel fails to return an error
1418 * code if it rejected the user's attempt to set RLIMIT_CPU. This is a
1419 * very long-standing error, and fixing it now risks breakage of
1420 * applications, so we live with it
1422 if (!retval && new_rlim && resource == RLIMIT_CPU &&
1423 new_rlim->rlim_cur != RLIM_INFINITY &&
1424 IS_ENABLED(CONFIG_POSIX_TIMERS))
1425 update_rlimit_cpu(tsk, new_rlim->rlim_cur);
1427 read_unlock(&tasklist_lock);
1431 /* rcu lock must be held */
1432 static int check_prlimit_permission(struct task_struct *task)
1434 const struct cred *cred = current_cred(), *tcred;
1436 if (current == task)
1439 tcred = __task_cred(task);
1440 if (uid_eq(cred->uid, tcred->euid) &&
1441 uid_eq(cred->uid, tcred->suid) &&
1442 uid_eq(cred->uid, tcred->uid) &&
1443 gid_eq(cred->gid, tcred->egid) &&
1444 gid_eq(cred->gid, tcred->sgid) &&
1445 gid_eq(cred->gid, tcred->gid))
1447 if (ns_capable(tcred->user_ns, CAP_SYS_RESOURCE))
1453 SYSCALL_DEFINE4(prlimit64, pid_t, pid, unsigned int, resource,
1454 const struct rlimit64 __user *, new_rlim,
1455 struct rlimit64 __user *, old_rlim)
1457 struct rlimit64 old64, new64;
1458 struct rlimit old, new;
1459 struct task_struct *tsk;
1463 if (copy_from_user(&new64, new_rlim, sizeof(new64)))
1465 rlim64_to_rlim(&new64, &new);
1469 tsk = pid ? find_task_by_vpid(pid) : current;
1474 ret = check_prlimit_permission(tsk);
1479 get_task_struct(tsk);
1482 ret = do_prlimit(tsk, resource, new_rlim ? &new : NULL,
1483 old_rlim ? &old : NULL);
1485 if (!ret && old_rlim) {
1486 rlim_to_rlim64(&old, &old64);
1487 if (copy_to_user(old_rlim, &old64, sizeof(old64)))
1491 put_task_struct(tsk);
1495 SYSCALL_DEFINE2(setrlimit, unsigned int, resource, struct rlimit __user *, rlim)
1497 struct rlimit new_rlim;
1499 if (copy_from_user(&new_rlim, rlim, sizeof(*rlim)))
1501 return do_prlimit(current, resource, &new_rlim, NULL);
1505 * It would make sense to put struct rusage in the task_struct,
1506 * except that would make the task_struct be *really big*. After
1507 * task_struct gets moved into malloc'ed memory, it would
1508 * make sense to do this. It will make moving the rest of the information
1509 * a lot simpler! (Which we're not doing right now because we're not
1510 * measuring them yet).
1512 * When sampling multiple threads for RUSAGE_SELF, under SMP we might have
1513 * races with threads incrementing their own counters. But since word
1514 * reads are atomic, we either get new values or old values and we don't
1515 * care which for the sums. We always take the siglock to protect reading
1516 * the c* fields from p->signal from races with exit.c updating those
1517 * fields when reaping, so a sample either gets all the additions of a
1518 * given child after it's reaped, or none so this sample is before reaping.
1521 * We need to take the siglock for CHILDEREN, SELF and BOTH
1522 * for the cases current multithreaded, non-current single threaded
1523 * non-current multithreaded. Thread traversal is now safe with
1525 * Strictly speaking, we donot need to take the siglock if we are current and
1526 * single threaded, as no one else can take our signal_struct away, no one
1527 * else can reap the children to update signal->c* counters, and no one else
1528 * can race with the signal-> fields. If we do not take any lock, the
1529 * signal-> fields could be read out of order while another thread was just
1530 * exiting. So we should place a read memory barrier when we avoid the lock.
1531 * On the writer side, write memory barrier is implied in __exit_signal
1532 * as __exit_signal releases the siglock spinlock after updating the signal->
1533 * fields. But we don't do this yet to keep things simple.
1537 static void accumulate_thread_rusage(struct task_struct *t, struct rusage *r)
1539 r->ru_nvcsw += t->nvcsw;
1540 r->ru_nivcsw += t->nivcsw;
1541 r->ru_minflt += t->min_flt;
1542 r->ru_majflt += t->maj_flt;
1543 r->ru_inblock += task_io_get_inblock(t);
1544 r->ru_oublock += task_io_get_oublock(t);
1547 static void k_getrusage(struct task_struct *p, int who, struct rusage *r)
1549 struct task_struct *t;
1550 unsigned long flags;
1551 u64 tgutime, tgstime, utime, stime;
1552 unsigned long maxrss = 0;
1554 memset((char *)r, 0, sizeof (*r));
1557 if (who == RUSAGE_THREAD) {
1558 task_cputime_adjusted(current, &utime, &stime);
1559 accumulate_thread_rusage(p, r);
1560 maxrss = p->signal->maxrss;
1564 if (!lock_task_sighand(p, &flags))
1569 case RUSAGE_CHILDREN:
1570 utime = p->signal->cutime;
1571 stime = p->signal->cstime;
1572 r->ru_nvcsw = p->signal->cnvcsw;
1573 r->ru_nivcsw = p->signal->cnivcsw;
1574 r->ru_minflt = p->signal->cmin_flt;
1575 r->ru_majflt = p->signal->cmaj_flt;
1576 r->ru_inblock = p->signal->cinblock;
1577 r->ru_oublock = p->signal->coublock;
1578 maxrss = p->signal->cmaxrss;
1580 if (who == RUSAGE_CHILDREN)
1584 thread_group_cputime_adjusted(p, &tgutime, &tgstime);
1587 r->ru_nvcsw += p->signal->nvcsw;
1588 r->ru_nivcsw += p->signal->nivcsw;
1589 r->ru_minflt += p->signal->min_flt;
1590 r->ru_majflt += p->signal->maj_flt;
1591 r->ru_inblock += p->signal->inblock;
1592 r->ru_oublock += p->signal->oublock;
1593 if (maxrss < p->signal->maxrss)
1594 maxrss = p->signal->maxrss;
1597 accumulate_thread_rusage(t, r);
1598 } while_each_thread(p, t);
1604 unlock_task_sighand(p, &flags);
1607 r->ru_utime = ns_to_timeval(utime);
1608 r->ru_stime = ns_to_timeval(stime);
1610 if (who != RUSAGE_CHILDREN) {
1611 struct mm_struct *mm = get_task_mm(p);
1614 setmax_mm_hiwater_rss(&maxrss, mm);
1618 r->ru_maxrss = maxrss * (PAGE_SIZE / 1024); /* convert pages to KBs */
1621 int getrusage(struct task_struct *p, int who, struct rusage __user *ru)
1625 k_getrusage(p, who, &r);
1626 return copy_to_user(ru, &r, sizeof(r)) ? -EFAULT : 0;
1629 SYSCALL_DEFINE2(getrusage, int, who, struct rusage __user *, ru)
1631 if (who != RUSAGE_SELF && who != RUSAGE_CHILDREN &&
1632 who != RUSAGE_THREAD)
1634 return getrusage(current, who, ru);
1637 #ifdef CONFIG_COMPAT
1638 COMPAT_SYSCALL_DEFINE2(getrusage, int, who, struct compat_rusage __user *, ru)
1642 if (who != RUSAGE_SELF && who != RUSAGE_CHILDREN &&
1643 who != RUSAGE_THREAD)
1646 k_getrusage(current, who, &r);
1647 return put_compat_rusage(&r, ru);
1651 SYSCALL_DEFINE1(umask, int, mask)
1653 mask = xchg(¤t->fs->umask, mask & S_IRWXUGO);
1657 static int prctl_set_mm_exe_file(struct mm_struct *mm, unsigned int fd)
1660 struct file *old_exe, *exe_file;
1661 struct inode *inode;
1668 inode = file_inode(exe.file);
1671 * Because the original mm->exe_file points to executable file, make
1672 * sure that this one is executable as well, to avoid breaking an
1676 if (!S_ISREG(inode->i_mode) || path_noexec(&exe.file->f_path))
1679 err = inode_permission(inode, MAY_EXEC);
1684 * Forbid mm->exe_file change if old file still mapped.
1686 exe_file = get_mm_exe_file(mm);
1689 struct vm_area_struct *vma;
1691 down_read(&mm->mmap_sem);
1692 for (vma = mm->mmap; vma; vma = vma->vm_next) {
1695 if (path_equal(&vma->vm_file->f_path,
1700 up_read(&mm->mmap_sem);
1705 /* set the new file, lockless */
1707 old_exe = xchg(&mm->exe_file, exe.file);
1714 up_read(&mm->mmap_sem);
1720 * WARNING: we don't require any capability here so be very careful
1721 * in what is allowed for modification from userspace.
1723 static int validate_prctl_map(struct prctl_mm_map *prctl_map)
1725 unsigned long mmap_max_addr = TASK_SIZE;
1726 struct mm_struct *mm = current->mm;
1727 int error = -EINVAL, i;
1729 static const unsigned char offsets[] = {
1730 offsetof(struct prctl_mm_map, start_code),
1731 offsetof(struct prctl_mm_map, end_code),
1732 offsetof(struct prctl_mm_map, start_data),
1733 offsetof(struct prctl_mm_map, end_data),
1734 offsetof(struct prctl_mm_map, start_brk),
1735 offsetof(struct prctl_mm_map, brk),
1736 offsetof(struct prctl_mm_map, start_stack),
1737 offsetof(struct prctl_mm_map, arg_start),
1738 offsetof(struct prctl_mm_map, arg_end),
1739 offsetof(struct prctl_mm_map, env_start),
1740 offsetof(struct prctl_mm_map, env_end),
1744 * Make sure the members are not somewhere outside
1745 * of allowed address space.
1747 for (i = 0; i < ARRAY_SIZE(offsets); i++) {
1748 u64 val = *(u64 *)((char *)prctl_map + offsets[i]);
1750 if ((unsigned long)val >= mmap_max_addr ||
1751 (unsigned long)val < mmap_min_addr)
1756 * Make sure the pairs are ordered.
1758 #define __prctl_check_order(__m1, __op, __m2) \
1759 ((unsigned long)prctl_map->__m1 __op \
1760 (unsigned long)prctl_map->__m2) ? 0 : -EINVAL
1761 error = __prctl_check_order(start_code, <, end_code);
1762 error |= __prctl_check_order(start_data, <, end_data);
1763 error |= __prctl_check_order(start_brk, <=, brk);
1764 error |= __prctl_check_order(arg_start, <=, arg_end);
1765 error |= __prctl_check_order(env_start, <=, env_end);
1768 #undef __prctl_check_order
1773 * @brk should be after @end_data in traditional maps.
1775 if (prctl_map->start_brk <= prctl_map->end_data ||
1776 prctl_map->brk <= prctl_map->end_data)
1780 * Neither we should allow to override limits if they set.
1782 if (check_data_rlimit(rlimit(RLIMIT_DATA), prctl_map->brk,
1783 prctl_map->start_brk, prctl_map->end_data,
1784 prctl_map->start_data))
1788 * Someone is trying to cheat the auxv vector.
1790 if (prctl_map->auxv_size) {
1791 if (!prctl_map->auxv || prctl_map->auxv_size > sizeof(mm->saved_auxv))
1796 * Finally, make sure the caller has the rights to
1797 * change /proc/pid/exe link: only local root should
1800 if (prctl_map->exe_fd != (u32)-1) {
1801 struct user_namespace *ns = current_user_ns();
1802 const struct cred *cred = current_cred();
1804 if (!uid_eq(cred->uid, make_kuid(ns, 0)) ||
1805 !gid_eq(cred->gid, make_kgid(ns, 0)))
1814 #ifdef CONFIG_CHECKPOINT_RESTORE
1815 static int prctl_set_mm_map(int opt, const void __user *addr, unsigned long data_size)
1817 struct prctl_mm_map prctl_map = { .exe_fd = (u32)-1, };
1818 unsigned long user_auxv[AT_VECTOR_SIZE];
1819 struct mm_struct *mm = current->mm;
1822 BUILD_BUG_ON(sizeof(user_auxv) != sizeof(mm->saved_auxv));
1823 BUILD_BUG_ON(sizeof(struct prctl_mm_map) > 256);
1825 if (opt == PR_SET_MM_MAP_SIZE)
1826 return put_user((unsigned int)sizeof(prctl_map),
1827 (unsigned int __user *)addr);
1829 if (data_size != sizeof(prctl_map))
1832 if (copy_from_user(&prctl_map, addr, sizeof(prctl_map)))
1835 error = validate_prctl_map(&prctl_map);
1839 if (prctl_map.auxv_size) {
1840 memset(user_auxv, 0, sizeof(user_auxv));
1841 if (copy_from_user(user_auxv,
1842 (const void __user *)prctl_map.auxv,
1843 prctl_map.auxv_size))
1846 /* Last entry must be AT_NULL as specification requires */
1847 user_auxv[AT_VECTOR_SIZE - 2] = AT_NULL;
1848 user_auxv[AT_VECTOR_SIZE - 1] = AT_NULL;
1851 if (prctl_map.exe_fd != (u32)-1) {
1852 error = prctl_set_mm_exe_file(mm, prctl_map.exe_fd);
1857 down_write(&mm->mmap_sem);
1860 * We don't validate if these members are pointing to
1861 * real present VMAs because application may have correspond
1862 * VMAs already unmapped and kernel uses these members for statistics
1863 * output in procfs mostly, except
1865 * - @start_brk/@brk which are used in do_brk but kernel lookups
1866 * for VMAs when updating these memvers so anything wrong written
1867 * here cause kernel to swear at userspace program but won't lead
1868 * to any problem in kernel itself
1871 mm->start_code = prctl_map.start_code;
1872 mm->end_code = prctl_map.end_code;
1873 mm->start_data = prctl_map.start_data;
1874 mm->end_data = prctl_map.end_data;
1875 mm->start_brk = prctl_map.start_brk;
1876 mm->brk = prctl_map.brk;
1877 mm->start_stack = prctl_map.start_stack;
1878 mm->arg_start = prctl_map.arg_start;
1879 mm->arg_end = prctl_map.arg_end;
1880 mm->env_start = prctl_map.env_start;
1881 mm->env_end = prctl_map.env_end;
1884 * Note this update of @saved_auxv is lockless thus
1885 * if someone reads this member in procfs while we're
1886 * updating -- it may get partly updated results. It's
1887 * known and acceptable trade off: we leave it as is to
1888 * not introduce additional locks here making the kernel
1891 if (prctl_map.auxv_size)
1892 memcpy(mm->saved_auxv, user_auxv, sizeof(user_auxv));
1894 up_write(&mm->mmap_sem);
1897 #endif /* CONFIG_CHECKPOINT_RESTORE */
1899 static int prctl_set_auxv(struct mm_struct *mm, unsigned long addr,
1903 * This doesn't move the auxiliary vector itself since it's pinned to
1904 * mm_struct, but it permits filling the vector with new values. It's
1905 * up to the caller to provide sane values here, otherwise userspace
1906 * tools which use this vector might be unhappy.
1908 unsigned long user_auxv[AT_VECTOR_SIZE];
1910 if (len > sizeof(user_auxv))
1913 if (copy_from_user(user_auxv, (const void __user *)addr, len))
1916 /* Make sure the last entry is always AT_NULL */
1917 user_auxv[AT_VECTOR_SIZE - 2] = 0;
1918 user_auxv[AT_VECTOR_SIZE - 1] = 0;
1920 BUILD_BUG_ON(sizeof(user_auxv) != sizeof(mm->saved_auxv));
1923 memcpy(mm->saved_auxv, user_auxv, len);
1924 task_unlock(current);
1929 static int prctl_set_mm(int opt, unsigned long addr,
1930 unsigned long arg4, unsigned long arg5)
1932 struct mm_struct *mm = current->mm;
1933 struct prctl_mm_map prctl_map;
1934 struct vm_area_struct *vma;
1937 if (arg5 || (arg4 && (opt != PR_SET_MM_AUXV &&
1938 opt != PR_SET_MM_MAP &&
1939 opt != PR_SET_MM_MAP_SIZE)))
1942 #ifdef CONFIG_CHECKPOINT_RESTORE
1943 if (opt == PR_SET_MM_MAP || opt == PR_SET_MM_MAP_SIZE)
1944 return prctl_set_mm_map(opt, (const void __user *)addr, arg4);
1947 if (!capable(CAP_SYS_RESOURCE))
1950 if (opt == PR_SET_MM_EXE_FILE)
1951 return prctl_set_mm_exe_file(mm, (unsigned int)addr);
1953 if (opt == PR_SET_MM_AUXV)
1954 return prctl_set_auxv(mm, addr, arg4);
1956 if (addr >= TASK_SIZE || addr < mmap_min_addr)
1961 down_write(&mm->mmap_sem);
1962 vma = find_vma(mm, addr);
1964 prctl_map.start_code = mm->start_code;
1965 prctl_map.end_code = mm->end_code;
1966 prctl_map.start_data = mm->start_data;
1967 prctl_map.end_data = mm->end_data;
1968 prctl_map.start_brk = mm->start_brk;
1969 prctl_map.brk = mm->brk;
1970 prctl_map.start_stack = mm->start_stack;
1971 prctl_map.arg_start = mm->arg_start;
1972 prctl_map.arg_end = mm->arg_end;
1973 prctl_map.env_start = mm->env_start;
1974 prctl_map.env_end = mm->env_end;
1975 prctl_map.auxv = NULL;
1976 prctl_map.auxv_size = 0;
1977 prctl_map.exe_fd = -1;
1980 case PR_SET_MM_START_CODE:
1981 prctl_map.start_code = addr;
1983 case PR_SET_MM_END_CODE:
1984 prctl_map.end_code = addr;
1986 case PR_SET_MM_START_DATA:
1987 prctl_map.start_data = addr;
1989 case PR_SET_MM_END_DATA:
1990 prctl_map.end_data = addr;
1992 case PR_SET_MM_START_STACK:
1993 prctl_map.start_stack = addr;
1995 case PR_SET_MM_START_BRK:
1996 prctl_map.start_brk = addr;
1999 prctl_map.brk = addr;
2001 case PR_SET_MM_ARG_START:
2002 prctl_map.arg_start = addr;
2004 case PR_SET_MM_ARG_END:
2005 prctl_map.arg_end = addr;
2007 case PR_SET_MM_ENV_START:
2008 prctl_map.env_start = addr;
2010 case PR_SET_MM_ENV_END:
2011 prctl_map.env_end = addr;
2017 error = validate_prctl_map(&prctl_map);
2023 * If command line arguments and environment
2024 * are placed somewhere else on stack, we can
2025 * set them up here, ARG_START/END to setup
2026 * command line argumets and ENV_START/END
2029 case PR_SET_MM_START_STACK:
2030 case PR_SET_MM_ARG_START:
2031 case PR_SET_MM_ARG_END:
2032 case PR_SET_MM_ENV_START:
2033 case PR_SET_MM_ENV_END:
2040 mm->start_code = prctl_map.start_code;
2041 mm->end_code = prctl_map.end_code;
2042 mm->start_data = prctl_map.start_data;
2043 mm->end_data = prctl_map.end_data;
2044 mm->start_brk = prctl_map.start_brk;
2045 mm->brk = prctl_map.brk;
2046 mm->start_stack = prctl_map.start_stack;
2047 mm->arg_start = prctl_map.arg_start;
2048 mm->arg_end = prctl_map.arg_end;
2049 mm->env_start = prctl_map.env_start;
2050 mm->env_end = prctl_map.env_end;
2054 up_write(&mm->mmap_sem);
2058 #ifdef CONFIG_CHECKPOINT_RESTORE
2059 static int prctl_get_tid_address(struct task_struct *me, int __user **tid_addr)
2061 return put_user(me->clear_child_tid, tid_addr);
2064 static int prctl_get_tid_address(struct task_struct *me, int __user **tid_addr)
2070 static int propagate_has_child_subreaper(struct task_struct *p, void *data)
2073 * If task has has_child_subreaper - all its decendants
2074 * already have these flag too and new decendants will
2075 * inherit it on fork, skip them.
2077 * If we've found child_reaper - skip descendants in
2078 * it's subtree as they will never get out pidns.
2080 if (p->signal->has_child_subreaper ||
2081 is_child_reaper(task_pid(p)))
2084 p->signal->has_child_subreaper = 1;
2088 SYSCALL_DEFINE5(prctl, int, option, unsigned long, arg2, unsigned long, arg3,
2089 unsigned long, arg4, unsigned long, arg5)
2091 struct task_struct *me = current;
2092 unsigned char comm[sizeof(me->comm)];
2095 error = security_task_prctl(option, arg2, arg3, arg4, arg5);
2096 if (error != -ENOSYS)
2101 case PR_SET_PDEATHSIG:
2102 if (!valid_signal(arg2)) {
2106 me->pdeath_signal = arg2;
2108 case PR_GET_PDEATHSIG:
2109 error = put_user(me->pdeath_signal, (int __user *)arg2);
2111 case PR_GET_DUMPABLE:
2112 error = get_dumpable(me->mm);
2114 case PR_SET_DUMPABLE:
2115 if (arg2 != SUID_DUMP_DISABLE && arg2 != SUID_DUMP_USER) {
2119 set_dumpable(me->mm, arg2);
2122 case PR_SET_UNALIGN:
2123 error = SET_UNALIGN_CTL(me, arg2);
2125 case PR_GET_UNALIGN:
2126 error = GET_UNALIGN_CTL(me, arg2);
2129 error = SET_FPEMU_CTL(me, arg2);
2132 error = GET_FPEMU_CTL(me, arg2);
2135 error = SET_FPEXC_CTL(me, arg2);
2138 error = GET_FPEXC_CTL(me, arg2);
2141 error = PR_TIMING_STATISTICAL;
2144 if (arg2 != PR_TIMING_STATISTICAL)
2148 comm[sizeof(me->comm) - 1] = 0;
2149 if (strncpy_from_user(comm, (char __user *)arg2,
2150 sizeof(me->comm) - 1) < 0)
2152 set_task_comm(me, comm);
2153 proc_comm_connector(me);
2156 get_task_comm(comm, me);
2157 if (copy_to_user((char __user *)arg2, comm, sizeof(comm)))
2161 error = GET_ENDIAN(me, arg2);
2164 error = SET_ENDIAN(me, arg2);
2166 case PR_GET_SECCOMP:
2167 error = prctl_get_seccomp();
2169 case PR_SET_SECCOMP:
2170 error = prctl_set_seccomp(arg2, (char __user *)arg3);
2173 error = GET_TSC_CTL(arg2);
2176 error = SET_TSC_CTL(arg2);
2178 case PR_TASK_PERF_EVENTS_DISABLE:
2179 error = perf_event_task_disable();
2181 case PR_TASK_PERF_EVENTS_ENABLE:
2182 error = perf_event_task_enable();
2184 case PR_GET_TIMERSLACK:
2185 if (current->timer_slack_ns > ULONG_MAX)
2188 error = current->timer_slack_ns;
2190 case PR_SET_TIMERSLACK:
2192 current->timer_slack_ns =
2193 current->default_timer_slack_ns;
2195 current->timer_slack_ns = arg2;
2201 case PR_MCE_KILL_CLEAR:
2204 current->flags &= ~PF_MCE_PROCESS;
2206 case PR_MCE_KILL_SET:
2207 current->flags |= PF_MCE_PROCESS;
2208 if (arg3 == PR_MCE_KILL_EARLY)
2209 current->flags |= PF_MCE_EARLY;
2210 else if (arg3 == PR_MCE_KILL_LATE)
2211 current->flags &= ~PF_MCE_EARLY;
2212 else if (arg3 == PR_MCE_KILL_DEFAULT)
2214 ~(PF_MCE_EARLY|PF_MCE_PROCESS);
2222 case PR_MCE_KILL_GET:
2223 if (arg2 | arg3 | arg4 | arg5)
2225 if (current->flags & PF_MCE_PROCESS)
2226 error = (current->flags & PF_MCE_EARLY) ?
2227 PR_MCE_KILL_EARLY : PR_MCE_KILL_LATE;
2229 error = PR_MCE_KILL_DEFAULT;
2232 error = prctl_set_mm(arg2, arg3, arg4, arg5);
2234 case PR_GET_TID_ADDRESS:
2235 error = prctl_get_tid_address(me, (int __user **)arg2);
2237 case PR_SET_CHILD_SUBREAPER:
2238 me->signal->is_child_subreaper = !!arg2;
2242 walk_process_tree(me, propagate_has_child_subreaper, NULL);
2244 case PR_GET_CHILD_SUBREAPER:
2245 error = put_user(me->signal->is_child_subreaper,
2246 (int __user *)arg2);
2248 case PR_SET_NO_NEW_PRIVS:
2249 if (arg2 != 1 || arg3 || arg4 || arg5)
2252 task_set_no_new_privs(current);
2254 case PR_GET_NO_NEW_PRIVS:
2255 if (arg2 || arg3 || arg4 || arg5)
2257 return task_no_new_privs(current) ? 1 : 0;
2258 case PR_GET_THP_DISABLE:
2259 if (arg2 || arg3 || arg4 || arg5)
2261 error = !!(me->mm->def_flags & VM_NOHUGEPAGE);
2263 case PR_SET_THP_DISABLE:
2264 if (arg3 || arg4 || arg5)
2266 if (down_write_killable(&me->mm->mmap_sem))
2269 me->mm->def_flags |= VM_NOHUGEPAGE;
2271 me->mm->def_flags &= ~VM_NOHUGEPAGE;
2272 up_write(&me->mm->mmap_sem);
2274 case PR_MPX_ENABLE_MANAGEMENT:
2275 if (arg2 || arg3 || arg4 || arg5)
2277 error = MPX_ENABLE_MANAGEMENT();
2279 case PR_MPX_DISABLE_MANAGEMENT:
2280 if (arg2 || arg3 || arg4 || arg5)
2282 error = MPX_DISABLE_MANAGEMENT();
2284 case PR_SET_FP_MODE:
2285 error = SET_FP_MODE(me, arg2);
2287 case PR_GET_FP_MODE:
2288 error = GET_FP_MODE(me);
2297 SYSCALL_DEFINE3(getcpu, unsigned __user *, cpup, unsigned __user *, nodep,
2298 struct getcpu_cache __user *, unused)
2301 int cpu = raw_smp_processor_id();
2304 err |= put_user(cpu, cpup);
2306 err |= put_user(cpu_to_node(cpu), nodep);
2307 return err ? -EFAULT : 0;
2311 * do_sysinfo - fill in sysinfo struct
2312 * @info: pointer to buffer to fill
2314 static int do_sysinfo(struct sysinfo *info)
2316 unsigned long mem_total, sav_total;
2317 unsigned int mem_unit, bitcount;
2320 memset(info, 0, sizeof(struct sysinfo));
2322 get_monotonic_boottime(&tp);
2323 info->uptime = tp.tv_sec + (tp.tv_nsec ? 1 : 0);
2325 get_avenrun(info->loads, 0, SI_LOAD_SHIFT - FSHIFT);
2327 info->procs = nr_threads;
2333 * If the sum of all the available memory (i.e. ram + swap)
2334 * is less than can be stored in a 32 bit unsigned long then
2335 * we can be binary compatible with 2.2.x kernels. If not,
2336 * well, in that case 2.2.x was broken anyways...
2338 * -Erik Andersen <andersee@debian.org>
2341 mem_total = info->totalram + info->totalswap;
2342 if (mem_total < info->totalram || mem_total < info->totalswap)
2345 mem_unit = info->mem_unit;
2346 while (mem_unit > 1) {
2349 sav_total = mem_total;
2351 if (mem_total < sav_total)
2356 * If mem_total did not overflow, multiply all memory values by
2357 * info->mem_unit and set it to 1. This leaves things compatible
2358 * with 2.2.x, and also retains compatibility with earlier 2.4.x
2363 info->totalram <<= bitcount;
2364 info->freeram <<= bitcount;
2365 info->sharedram <<= bitcount;
2366 info->bufferram <<= bitcount;
2367 info->totalswap <<= bitcount;
2368 info->freeswap <<= bitcount;
2369 info->totalhigh <<= bitcount;
2370 info->freehigh <<= bitcount;
2376 SYSCALL_DEFINE1(sysinfo, struct sysinfo __user *, info)
2382 if (copy_to_user(info, &val, sizeof(struct sysinfo)))
2388 #ifdef CONFIG_COMPAT
2389 struct compat_sysinfo {
2403 char _f[20-2*sizeof(u32)-sizeof(int)];
2406 COMPAT_SYSCALL_DEFINE1(sysinfo, struct compat_sysinfo __user *, info)
2412 /* Check to see if any memory value is too large for 32-bit and scale
2415 if (upper_32_bits(s.totalram) || upper_32_bits(s.totalswap)) {
2418 while (s.mem_unit < PAGE_SIZE) {
2423 s.totalram >>= bitcount;
2424 s.freeram >>= bitcount;
2425 s.sharedram >>= bitcount;
2426 s.bufferram >>= bitcount;
2427 s.totalswap >>= bitcount;
2428 s.freeswap >>= bitcount;
2429 s.totalhigh >>= bitcount;
2430 s.freehigh >>= bitcount;
2433 if (!access_ok(VERIFY_WRITE, info, sizeof(struct compat_sysinfo)) ||
2434 __put_user(s.uptime, &info->uptime) ||
2435 __put_user(s.loads[0], &info->loads[0]) ||
2436 __put_user(s.loads[1], &info->loads[1]) ||
2437 __put_user(s.loads[2], &info->loads[2]) ||
2438 __put_user(s.totalram, &info->totalram) ||
2439 __put_user(s.freeram, &info->freeram) ||
2440 __put_user(s.sharedram, &info->sharedram) ||
2441 __put_user(s.bufferram, &info->bufferram) ||
2442 __put_user(s.totalswap, &info->totalswap) ||
2443 __put_user(s.freeswap, &info->freeswap) ||
2444 __put_user(s.procs, &info->procs) ||
2445 __put_user(s.totalhigh, &info->totalhigh) ||
2446 __put_user(s.freehigh, &info->freehigh) ||
2447 __put_user(s.mem_unit, &info->mem_unit))
2452 #endif /* CONFIG_COMPAT */