]> git.kernelconcepts.de Git - karo-tx-linux.git/blob - kernel/fork.c
powerpc/atomic: Implement atomic*_inc_not_zero
[karo-tx-linux.git] / kernel / fork.c
1 /*
2  *  linux/kernel/fork.c
3  *
4  *  Copyright (C) 1991, 1992  Linus Torvalds
5  */
6
7 /*
8  *  'fork.c' contains the help-routines for the 'fork' system call
9  * (see also entry.S and others).
10  * Fork is rather simple, once you get the hang of it, but the memory
11  * management can be a bitch. See 'mm/memory.c': 'copy_page_range()'
12  */
13
14 #include <linux/slab.h>
15 #include <linux/init.h>
16 #include <linux/unistd.h>
17 #include <linux/module.h>
18 #include <linux/vmalloc.h>
19 #include <linux/completion.h>
20 #include <linux/personality.h>
21 #include <linux/mempolicy.h>
22 #include <linux/sem.h>
23 #include <linux/file.h>
24 #include <linux/fdtable.h>
25 #include <linux/iocontext.h>
26 #include <linux/key.h>
27 #include <linux/binfmts.h>
28 #include <linux/mman.h>
29 #include <linux/mmu_notifier.h>
30 #include <linux/fs.h>
31 #include <linux/nsproxy.h>
32 #include <linux/capability.h>
33 #include <linux/cpu.h>
34 #include <linux/cgroup.h>
35 #include <linux/security.h>
36 #include <linux/hugetlb.h>
37 #include <linux/swap.h>
38 #include <linux/syscalls.h>
39 #include <linux/jiffies.h>
40 #include <linux/futex.h>
41 #include <linux/compat.h>
42 #include <linux/kthread.h>
43 #include <linux/task_io_accounting_ops.h>
44 #include <linux/rcupdate.h>
45 #include <linux/ptrace.h>
46 #include <linux/mount.h>
47 #include <linux/audit.h>
48 #include <linux/memcontrol.h>
49 #include <linux/ftrace.h>
50 #include <linux/profile.h>
51 #include <linux/rmap.h>
52 #include <linux/ksm.h>
53 #include <linux/acct.h>
54 #include <linux/tsacct_kern.h>
55 #include <linux/cn_proc.h>
56 #include <linux/freezer.h>
57 #include <linux/delayacct.h>
58 #include <linux/taskstats_kern.h>
59 #include <linux/random.h>
60 #include <linux/tty.h>
61 #include <linux/blkdev.h>
62 #include <linux/fs_struct.h>
63 #include <linux/magic.h>
64 #include <linux/perf_event.h>
65 #include <linux/posix-timers.h>
66 #include <linux/user-return-notifier.h>
67 #include <linux/oom.h>
68 #include <linux/khugepaged.h>
69 #include <linux/signalfd.h>
70
71 #include <asm/pgtable.h>
72 #include <asm/pgalloc.h>
73 #include <asm/uaccess.h>
74 #include <asm/mmu_context.h>
75 #include <asm/cacheflush.h>
76 #include <asm/tlbflush.h>
77
78 #include <trace/events/sched.h>
79
80 #define CREATE_TRACE_POINTS
81 #include <trace/events/task.h>
82
83 /*
84  * Protected counters by write_lock_irq(&tasklist_lock)
85  */
86 unsigned long total_forks;      /* Handle normal Linux uptimes. */
87 int nr_threads;                 /* The idle threads do not count.. */
88
89 int max_threads;                /* tunable limit on nr_threads */
90
91 DEFINE_PER_CPU(unsigned long, process_counts) = 0;
92
93 __cacheline_aligned DEFINE_RWLOCK(tasklist_lock);  /* outer */
94
95 #ifdef CONFIG_PROVE_RCU
96 int lockdep_tasklist_lock_is_held(void)
97 {
98         return lockdep_is_held(&tasklist_lock);
99 }
100 EXPORT_SYMBOL_GPL(lockdep_tasklist_lock_is_held);
101 #endif /* #ifdef CONFIG_PROVE_RCU */
102
103 int nr_processes(void)
104 {
105         int cpu;
106         int total = 0;
107
108         for_each_possible_cpu(cpu)
109                 total += per_cpu(process_counts, cpu);
110
111         return total;
112 }
113
114 #ifndef __HAVE_ARCH_TASK_STRUCT_ALLOCATOR
115 # define alloc_task_struct_node(node)           \
116                 kmem_cache_alloc_node(task_struct_cachep, GFP_KERNEL, node)
117 # define free_task_struct(tsk)                  \
118                 kmem_cache_free(task_struct_cachep, (tsk))
119 static struct kmem_cache *task_struct_cachep;
120 #endif
121
122 #ifndef __HAVE_ARCH_THREAD_INFO_ALLOCATOR
123 static struct thread_info *alloc_thread_info_node(struct task_struct *tsk,
124                                                   int node)
125 {
126 #ifdef CONFIG_DEBUG_STACK_USAGE
127         gfp_t mask = GFP_KERNEL | __GFP_ZERO;
128 #else
129         gfp_t mask = GFP_KERNEL;
130 #endif
131         struct page *page = alloc_pages_node(node, mask, THREAD_SIZE_ORDER);
132
133         return page ? page_address(page) : NULL;
134 }
135
136 static inline void free_thread_info(struct thread_info *ti)
137 {
138         free_pages((unsigned long)ti, THREAD_SIZE_ORDER);
139 }
140 #endif
141
142 /* SLAB cache for signal_struct structures (tsk->signal) */
143 static struct kmem_cache *signal_cachep;
144
145 /* SLAB cache for sighand_struct structures (tsk->sighand) */
146 struct kmem_cache *sighand_cachep;
147
148 /* SLAB cache for files_struct structures (tsk->files) */
149 struct kmem_cache *files_cachep;
150
151 /* SLAB cache for fs_struct structures (tsk->fs) */
152 struct kmem_cache *fs_cachep;
153
154 /* SLAB cache for vm_area_struct structures */
155 struct kmem_cache *vm_area_cachep;
156
157 /* SLAB cache for mm_struct structures (tsk->mm) */
158 static struct kmem_cache *mm_cachep;
159
160 static void account_kernel_stack(struct thread_info *ti, int account)
161 {
162         struct zone *zone = page_zone(virt_to_page(ti));
163
164         mod_zone_page_state(zone, NR_KERNEL_STACK, account);
165 }
166
167 void free_task(struct task_struct *tsk)
168 {
169         account_kernel_stack(tsk->stack, -1);
170         free_thread_info(tsk->stack);
171         rt_mutex_debug_task_free(tsk);
172         ftrace_graph_exit_task(tsk);
173         free_task_struct(tsk);
174 }
175 EXPORT_SYMBOL(free_task);
176
177 static inline void free_signal_struct(struct signal_struct *sig)
178 {
179         taskstats_tgid_free(sig);
180         sched_autogroup_exit(sig);
181         kmem_cache_free(signal_cachep, sig);
182 }
183
184 static inline void put_signal_struct(struct signal_struct *sig)
185 {
186         if (atomic_dec_and_test(&sig->sigcnt))
187                 free_signal_struct(sig);
188 }
189
190 void __put_task_struct(struct task_struct *tsk)
191 {
192         WARN_ON(!tsk->exit_state);
193         WARN_ON(atomic_read(&tsk->usage));
194         WARN_ON(tsk == current);
195
196         exit_creds(tsk);
197         delayacct_tsk_free(tsk);
198         put_signal_struct(tsk->signal);
199
200         if (!profile_handoff_task(tsk))
201                 free_task(tsk);
202 }
203 EXPORT_SYMBOL_GPL(__put_task_struct);
204
205 /*
206  * macro override instead of weak attribute alias, to workaround
207  * gcc 4.1.0 and 4.1.1 bugs with weak attribute and empty functions.
208  */
209 #ifndef arch_task_cache_init
210 #define arch_task_cache_init()
211 #endif
212
213 void __init fork_init(unsigned long mempages)
214 {
215 #ifndef __HAVE_ARCH_TASK_STRUCT_ALLOCATOR
216 #ifndef ARCH_MIN_TASKALIGN
217 #define ARCH_MIN_TASKALIGN      L1_CACHE_BYTES
218 #endif
219         /* create a slab on which task_structs can be allocated */
220         task_struct_cachep =
221                 kmem_cache_create("task_struct", sizeof(struct task_struct),
222                         ARCH_MIN_TASKALIGN, SLAB_PANIC | SLAB_NOTRACK, NULL);
223 #endif
224
225         /* do the arch specific task caches init */
226         arch_task_cache_init();
227
228         /*
229          * The default maximum number of threads is set to a safe
230          * value: the thread structures can take up at most half
231          * of memory.
232          */
233         max_threads = mempages / (8 * THREAD_SIZE / PAGE_SIZE);
234
235         /*
236          * we need to allow at least 20 threads to boot a system
237          */
238         if (max_threads < 20)
239                 max_threads = 20;
240
241         init_task.signal->rlim[RLIMIT_NPROC].rlim_cur = max_threads/2;
242         init_task.signal->rlim[RLIMIT_NPROC].rlim_max = max_threads/2;
243         init_task.signal->rlim[RLIMIT_SIGPENDING] =
244                 init_task.signal->rlim[RLIMIT_NPROC];
245 }
246
247 int __attribute__((weak)) arch_dup_task_struct(struct task_struct *dst,
248                                                struct task_struct *src)
249 {
250         *dst = *src;
251         return 0;
252 }
253
254 static struct task_struct *dup_task_struct(struct task_struct *orig)
255 {
256         struct task_struct *tsk;
257         struct thread_info *ti;
258         unsigned long *stackend;
259         int node = tsk_fork_get_node(orig);
260         int err;
261
262         prepare_to_copy(orig);
263
264         tsk = alloc_task_struct_node(node);
265         if (!tsk)
266                 return NULL;
267
268         ti = alloc_thread_info_node(tsk, node);
269         if (!ti) {
270                 free_task_struct(tsk);
271                 return NULL;
272         }
273
274         err = arch_dup_task_struct(tsk, orig);
275         if (err)
276                 goto out;
277
278         tsk->stack = ti;
279
280         setup_thread_stack(tsk, orig);
281         clear_user_return_notifier(tsk);
282         clear_tsk_need_resched(tsk);
283         stackend = end_of_stack(tsk);
284         *stackend = STACK_END_MAGIC;    /* for overflow detection */
285
286 #ifdef CONFIG_CC_STACKPROTECTOR
287         tsk->stack_canary = get_random_int();
288 #endif
289
290         /*
291          * One for us, one for whoever does the "release_task()" (usually
292          * parent)
293          */
294         atomic_set(&tsk->usage, 2);
295 #ifdef CONFIG_BLK_DEV_IO_TRACE
296         tsk->btrace_seq = 0;
297 #endif
298         tsk->splice_pipe = NULL;
299
300         account_kernel_stack(ti, 1);
301
302         return tsk;
303
304 out:
305         free_thread_info(ti);
306         free_task_struct(tsk);
307         return NULL;
308 }
309
310 #ifdef CONFIG_MMU
311 static int dup_mmap(struct mm_struct *mm, struct mm_struct *oldmm)
312 {
313         struct vm_area_struct *mpnt, *tmp, *prev, **pprev;
314         struct rb_node **rb_link, *rb_parent;
315         int retval;
316         unsigned long charge;
317         struct mempolicy *pol;
318
319         down_write(&oldmm->mmap_sem);
320         flush_cache_dup_mm(oldmm);
321         /*
322          * Not linked in yet - no deadlock potential:
323          */
324         down_write_nested(&mm->mmap_sem, SINGLE_DEPTH_NESTING);
325
326         mm->locked_vm = 0;
327         mm->mmap = NULL;
328         mm->mmap_cache = NULL;
329         mm->free_area_cache = oldmm->mmap_base;
330         mm->cached_hole_size = ~0UL;
331         mm->map_count = 0;
332         cpumask_clear(mm_cpumask(mm));
333         mm->mm_rb = RB_ROOT;
334         rb_link = &mm->mm_rb.rb_node;
335         rb_parent = NULL;
336         pprev = &mm->mmap;
337         retval = ksm_fork(mm, oldmm);
338         if (retval)
339                 goto out;
340         retval = khugepaged_fork(mm, oldmm);
341         if (retval)
342                 goto out;
343
344         prev = NULL;
345         for (mpnt = oldmm->mmap; mpnt; mpnt = mpnt->vm_next) {
346                 struct file *file;
347
348                 if (mpnt->vm_flags & VM_DONTCOPY) {
349                         long pages = vma_pages(mpnt);
350                         mm->total_vm -= pages;
351                         vm_stat_account(mm, mpnt->vm_flags, mpnt->vm_file,
352                                                                 -pages);
353                         continue;
354                 }
355                 charge = 0;
356                 if (mpnt->vm_flags & VM_ACCOUNT) {
357                         unsigned int len = (mpnt->vm_end - mpnt->vm_start) >> PAGE_SHIFT;
358                         if (security_vm_enough_memory(len))
359                                 goto fail_nomem;
360                         charge = len;
361                 }
362                 tmp = kmem_cache_alloc(vm_area_cachep, GFP_KERNEL);
363                 if (!tmp)
364                         goto fail_nomem;
365                 *tmp = *mpnt;
366                 INIT_LIST_HEAD(&tmp->anon_vma_chain);
367                 pol = mpol_dup(vma_policy(mpnt));
368                 retval = PTR_ERR(pol);
369                 if (IS_ERR(pol))
370                         goto fail_nomem_policy;
371                 vma_set_policy(tmp, pol);
372                 tmp->vm_mm = mm;
373                 if (anon_vma_fork(tmp, mpnt))
374                         goto fail_nomem_anon_vma_fork;
375                 tmp->vm_flags &= ~VM_LOCKED;
376                 tmp->vm_next = tmp->vm_prev = NULL;
377                 file = tmp->vm_file;
378                 if (file) {
379                         struct inode *inode = file->f_path.dentry->d_inode;
380                         struct address_space *mapping = file->f_mapping;
381
382                         get_file(file);
383                         if (tmp->vm_flags & VM_DENYWRITE)
384                                 atomic_dec(&inode->i_writecount);
385                         mutex_lock(&mapping->i_mmap_mutex);
386                         if (tmp->vm_flags & VM_SHARED)
387                                 mapping->i_mmap_writable++;
388                         flush_dcache_mmap_lock(mapping);
389                         /* insert tmp into the share list, just after mpnt */
390                         vma_prio_tree_add(tmp, mpnt);
391                         flush_dcache_mmap_unlock(mapping);
392                         mutex_unlock(&mapping->i_mmap_mutex);
393                 }
394
395                 /*
396                  * Clear hugetlb-related page reserves for children. This only
397                  * affects MAP_PRIVATE mappings. Faults generated by the child
398                  * are not guaranteed to succeed, even if read-only
399                  */
400                 if (is_vm_hugetlb_page(tmp))
401                         reset_vma_resv_huge_pages(tmp);
402
403                 /*
404                  * Link in the new vma and copy the page table entries.
405                  */
406                 *pprev = tmp;
407                 pprev = &tmp->vm_next;
408                 tmp->vm_prev = prev;
409                 prev = tmp;
410
411                 __vma_link_rb(mm, tmp, rb_link, rb_parent);
412                 rb_link = &tmp->vm_rb.rb_right;
413                 rb_parent = &tmp->vm_rb;
414
415                 mm->map_count++;
416                 retval = copy_page_range(mm, oldmm, mpnt);
417
418                 if (tmp->vm_ops && tmp->vm_ops->open)
419                         tmp->vm_ops->open(tmp);
420
421                 if (retval)
422                         goto out;
423         }
424         /* a new mm has just been created */
425         arch_dup_mmap(oldmm, mm);
426         retval = 0;
427 out:
428         up_write(&mm->mmap_sem);
429         flush_tlb_mm(oldmm);
430         up_write(&oldmm->mmap_sem);
431         return retval;
432 fail_nomem_anon_vma_fork:
433         mpol_put(pol);
434 fail_nomem_policy:
435         kmem_cache_free(vm_area_cachep, tmp);
436 fail_nomem:
437         retval = -ENOMEM;
438         vm_unacct_memory(charge);
439         goto out;
440 }
441
442 static inline int mm_alloc_pgd(struct mm_struct *mm)
443 {
444         mm->pgd = pgd_alloc(mm);
445         if (unlikely(!mm->pgd))
446                 return -ENOMEM;
447         return 0;
448 }
449
450 static inline void mm_free_pgd(struct mm_struct *mm)
451 {
452         pgd_free(mm, mm->pgd);
453 }
454 #else
455 #define dup_mmap(mm, oldmm)     (0)
456 #define mm_alloc_pgd(mm)        (0)
457 #define mm_free_pgd(mm)
458 #endif /* CONFIG_MMU */
459
460 __cacheline_aligned_in_smp DEFINE_SPINLOCK(mmlist_lock);
461
462 #define allocate_mm()   (kmem_cache_alloc(mm_cachep, GFP_KERNEL))
463 #define free_mm(mm)     (kmem_cache_free(mm_cachep, (mm)))
464
465 static unsigned long default_dump_filter = MMF_DUMP_FILTER_DEFAULT;
466
467 static int __init coredump_filter_setup(char *s)
468 {
469         default_dump_filter =
470                 (simple_strtoul(s, NULL, 0) << MMF_DUMP_FILTER_SHIFT) &
471                 MMF_DUMP_FILTER_MASK;
472         return 1;
473 }
474
475 __setup("coredump_filter=", coredump_filter_setup);
476
477 #include <linux/init_task.h>
478
479 static void mm_init_aio(struct mm_struct *mm)
480 {
481 #ifdef CONFIG_AIO
482         spin_lock_init(&mm->ioctx_lock);
483         INIT_HLIST_HEAD(&mm->ioctx_list);
484 #endif
485 }
486
487 static struct mm_struct *mm_init(struct mm_struct *mm, struct task_struct *p)
488 {
489         atomic_set(&mm->mm_users, 1);
490         atomic_set(&mm->mm_count, 1);
491         init_rwsem(&mm->mmap_sem);
492         INIT_LIST_HEAD(&mm->mmlist);
493         mm->flags = (current->mm) ?
494                 (current->mm->flags & MMF_INIT_MASK) : default_dump_filter;
495         mm->core_state = NULL;
496         mm->nr_ptes = 0;
497         memset(&mm->rss_stat, 0, sizeof(mm->rss_stat));
498         spin_lock_init(&mm->page_table_lock);
499         mm->free_area_cache = TASK_UNMAPPED_BASE;
500         mm->cached_hole_size = ~0UL;
501         mm_init_aio(mm);
502         mm_init_owner(mm, p);
503
504         if (likely(!mm_alloc_pgd(mm))) {
505                 mm->def_flags = 0;
506                 mmu_notifier_mm_init(mm);
507                 return mm;
508         }
509
510         free_mm(mm);
511         return NULL;
512 }
513
514 /*
515  * Allocate and initialize an mm_struct.
516  */
517 struct mm_struct *mm_alloc(void)
518 {
519         struct mm_struct *mm;
520
521         mm = allocate_mm();
522         if (!mm)
523                 return NULL;
524
525         memset(mm, 0, sizeof(*mm));
526         mm_init_cpumask(mm);
527         return mm_init(mm, current);
528 }
529
530 /*
531  * Called when the last reference to the mm
532  * is dropped: either by a lazy thread or by
533  * mmput. Free the page directory and the mm.
534  */
535 void __mmdrop(struct mm_struct *mm)
536 {
537         BUG_ON(mm == &init_mm);
538         mm_free_pgd(mm);
539         destroy_context(mm);
540         mmu_notifier_mm_destroy(mm);
541 #ifdef CONFIG_TRANSPARENT_HUGEPAGE
542         VM_BUG_ON(mm->pmd_huge_pte);
543 #endif
544         free_mm(mm);
545 }
546 EXPORT_SYMBOL_GPL(__mmdrop);
547
548 /*
549  * Decrement the use count and release all resources for an mm.
550  */
551 void mmput(struct mm_struct *mm)
552 {
553         might_sleep();
554
555         if (atomic_dec_and_test(&mm->mm_users)) {
556                 exit_aio(mm);
557                 ksm_exit(mm);
558                 khugepaged_exit(mm); /* must run before exit_mmap */
559                 exit_mmap(mm);
560                 set_mm_exe_file(mm, NULL);
561                 if (!list_empty(&mm->mmlist)) {
562                         spin_lock(&mmlist_lock);
563                         list_del(&mm->mmlist);
564                         spin_unlock(&mmlist_lock);
565                 }
566                 put_swap_token(mm);
567                 if (mm->binfmt)
568                         module_put(mm->binfmt->module);
569                 mmdrop(mm);
570         }
571 }
572 EXPORT_SYMBOL_GPL(mmput);
573
574 /*
575  * We added or removed a vma mapping the executable. The vmas are only mapped
576  * during exec and are not mapped with the mmap system call.
577  * Callers must hold down_write() on the mm's mmap_sem for these
578  */
579 void added_exe_file_vma(struct mm_struct *mm)
580 {
581         mm->num_exe_file_vmas++;
582 }
583
584 void removed_exe_file_vma(struct mm_struct *mm)
585 {
586         mm->num_exe_file_vmas--;
587         if ((mm->num_exe_file_vmas == 0) && mm->exe_file) {
588                 fput(mm->exe_file);
589                 mm->exe_file = NULL;
590         }
591
592 }
593
594 void set_mm_exe_file(struct mm_struct *mm, struct file *new_exe_file)
595 {
596         if (new_exe_file)
597                 get_file(new_exe_file);
598         if (mm->exe_file)
599                 fput(mm->exe_file);
600         mm->exe_file = new_exe_file;
601         mm->num_exe_file_vmas = 0;
602 }
603
604 struct file *get_mm_exe_file(struct mm_struct *mm)
605 {
606         struct file *exe_file;
607
608         /* We need mmap_sem to protect against races with removal of
609          * VM_EXECUTABLE vmas */
610         down_read(&mm->mmap_sem);
611         exe_file = mm->exe_file;
612         if (exe_file)
613                 get_file(exe_file);
614         up_read(&mm->mmap_sem);
615         return exe_file;
616 }
617
618 static void dup_mm_exe_file(struct mm_struct *oldmm, struct mm_struct *newmm)
619 {
620         /* It's safe to write the exe_file pointer without exe_file_lock because
621          * this is called during fork when the task is not yet in /proc */
622         newmm->exe_file = get_mm_exe_file(oldmm);
623 }
624
625 /**
626  * get_task_mm - acquire a reference to the task's mm
627  *
628  * Returns %NULL if the task has no mm.  Checks PF_KTHREAD (meaning
629  * this kernel workthread has transiently adopted a user mm with use_mm,
630  * to do its AIO) is not set and if so returns a reference to it, after
631  * bumping up the use count.  User must release the mm via mmput()
632  * after use.  Typically used by /proc and ptrace.
633  */
634 struct mm_struct *get_task_mm(struct task_struct *task)
635 {
636         struct mm_struct *mm;
637
638         task_lock(task);
639         mm = task->mm;
640         if (mm) {
641                 if (task->flags & PF_KTHREAD)
642                         mm = NULL;
643                 else
644                         atomic_inc(&mm->mm_users);
645         }
646         task_unlock(task);
647         return mm;
648 }
649 EXPORT_SYMBOL_GPL(get_task_mm);
650
651 struct mm_struct *mm_access(struct task_struct *task, unsigned int mode)
652 {
653         struct mm_struct *mm;
654         int err;
655
656         err =  mutex_lock_killable(&task->signal->cred_guard_mutex);
657         if (err)
658                 return ERR_PTR(err);
659
660         mm = get_task_mm(task);
661         if (mm && mm != current->mm &&
662                         !ptrace_may_access(task, mode)) {
663                 mmput(mm);
664                 mm = ERR_PTR(-EACCES);
665         }
666         mutex_unlock(&task->signal->cred_guard_mutex);
667
668         return mm;
669 }
670
671 /* Please note the differences between mmput and mm_release.
672  * mmput is called whenever we stop holding onto a mm_struct,
673  * error success whatever.
674  *
675  * mm_release is called after a mm_struct has been removed
676  * from the current process.
677  *
678  * This difference is important for error handling, when we
679  * only half set up a mm_struct for a new process and need to restore
680  * the old one.  Because we mmput the new mm_struct before
681  * restoring the old one. . .
682  * Eric Biederman 10 January 1998
683  */
684 void mm_release(struct task_struct *tsk, struct mm_struct *mm)
685 {
686         struct completion *vfork_done = tsk->vfork_done;
687
688         /* Get rid of any futexes when releasing the mm */
689 #ifdef CONFIG_FUTEX
690         if (unlikely(tsk->robust_list)) {
691                 exit_robust_list(tsk);
692                 tsk->robust_list = NULL;
693         }
694 #ifdef CONFIG_COMPAT
695         if (unlikely(tsk->compat_robust_list)) {
696                 compat_exit_robust_list(tsk);
697                 tsk->compat_robust_list = NULL;
698         }
699 #endif
700         if (unlikely(!list_empty(&tsk->pi_state_list)))
701                 exit_pi_state_list(tsk);
702 #endif
703
704         /* Get rid of any cached register state */
705         deactivate_mm(tsk, mm);
706
707         /* notify parent sleeping on vfork() */
708         if (vfork_done) {
709                 tsk->vfork_done = NULL;
710                 complete(vfork_done);
711         }
712
713         /*
714          * If we're exiting normally, clear a user-space tid field if
715          * requested.  We leave this alone when dying by signal, to leave
716          * the value intact in a core dump, and to save the unnecessary
717          * trouble otherwise.  Userland only wants this done for a sys_exit.
718          */
719         if (tsk->clear_child_tid) {
720                 if (!(tsk->flags & PF_SIGNALED) &&
721                     atomic_read(&mm->mm_users) > 1) {
722                         /*
723                          * We don't check the error code - if userspace has
724                          * not set up a proper pointer then tough luck.
725                          */
726                         put_user(0, tsk->clear_child_tid);
727                         sys_futex(tsk->clear_child_tid, FUTEX_WAKE,
728                                         1, NULL, NULL, 0);
729                 }
730                 tsk->clear_child_tid = NULL;
731         }
732 }
733
734 /*
735  * Allocate a new mm structure and copy contents from the
736  * mm structure of the passed in task structure.
737  */
738 struct mm_struct *dup_mm(struct task_struct *tsk)
739 {
740         struct mm_struct *mm, *oldmm = current->mm;
741         int err;
742
743         if (!oldmm)
744                 return NULL;
745
746         mm = allocate_mm();
747         if (!mm)
748                 goto fail_nomem;
749
750         memcpy(mm, oldmm, sizeof(*mm));
751         mm_init_cpumask(mm);
752
753         /* Initializing for Swap token stuff */
754         mm->token_priority = 0;
755         mm->last_interval = 0;
756
757 #ifdef CONFIG_TRANSPARENT_HUGEPAGE
758         mm->pmd_huge_pte = NULL;
759 #endif
760
761         if (!mm_init(mm, tsk))
762                 goto fail_nomem;
763
764         if (init_new_context(tsk, mm))
765                 goto fail_nocontext;
766
767         dup_mm_exe_file(oldmm, mm);
768
769         err = dup_mmap(mm, oldmm);
770         if (err)
771                 goto free_pt;
772
773         mm->hiwater_rss = get_mm_rss(mm);
774         mm->hiwater_vm = mm->total_vm;
775
776         if (mm->binfmt && !try_module_get(mm->binfmt->module))
777                 goto free_pt;
778
779         return mm;
780
781 free_pt:
782         /* don't put binfmt in mmput, we haven't got module yet */
783         mm->binfmt = NULL;
784         mmput(mm);
785
786 fail_nomem:
787         return NULL;
788
789 fail_nocontext:
790         /*
791          * If init_new_context() failed, we cannot use mmput() to free the mm
792          * because it calls destroy_context()
793          */
794         mm_free_pgd(mm);
795         free_mm(mm);
796         return NULL;
797 }
798
799 static int copy_mm(unsigned long clone_flags, struct task_struct *tsk)
800 {
801         struct mm_struct *mm, *oldmm;
802         int retval;
803
804         tsk->min_flt = tsk->maj_flt = 0;
805         tsk->nvcsw = tsk->nivcsw = 0;
806 #ifdef CONFIG_DETECT_HUNG_TASK
807         tsk->last_switch_count = tsk->nvcsw + tsk->nivcsw;
808 #endif
809
810         tsk->mm = NULL;
811         tsk->active_mm = NULL;
812
813         /*
814          * Are we cloning a kernel thread?
815          *
816          * We need to steal a active VM for that..
817          */
818         oldmm = current->mm;
819         if (!oldmm)
820                 return 0;
821
822         if (clone_flags & CLONE_VM) {
823                 atomic_inc(&oldmm->mm_users);
824                 mm = oldmm;
825                 goto good_mm;
826         }
827
828         retval = -ENOMEM;
829         mm = dup_mm(tsk);
830         if (!mm)
831                 goto fail_nomem;
832
833 good_mm:
834         /* Initializing for Swap token stuff */
835         mm->token_priority = 0;
836         mm->last_interval = 0;
837
838         tsk->mm = mm;
839         tsk->active_mm = mm;
840         return 0;
841
842 fail_nomem:
843         return retval;
844 }
845
846 static int copy_fs(unsigned long clone_flags, struct task_struct *tsk)
847 {
848         struct fs_struct *fs = current->fs;
849         if (clone_flags & CLONE_FS) {
850                 /* tsk->fs is already what we want */
851                 spin_lock(&fs->lock);
852                 if (fs->in_exec) {
853                         spin_unlock(&fs->lock);
854                         return -EAGAIN;
855                 }
856                 fs->users++;
857                 spin_unlock(&fs->lock);
858                 return 0;
859         }
860         tsk->fs = copy_fs_struct(fs);
861         if (!tsk->fs)
862                 return -ENOMEM;
863         return 0;
864 }
865
866 static int copy_files(unsigned long clone_flags, struct task_struct *tsk)
867 {
868         struct files_struct *oldf, *newf;
869         int error = 0;
870
871         /*
872          * A background process may not have any files ...
873          */
874         oldf = current->files;
875         if (!oldf)
876                 goto out;
877
878         if (clone_flags & CLONE_FILES) {
879                 atomic_inc(&oldf->count);
880                 goto out;
881         }
882
883         newf = dup_fd(oldf, &error);
884         if (!newf)
885                 goto out;
886
887         tsk->files = newf;
888         error = 0;
889 out:
890         return error;
891 }
892
893 static int copy_io(unsigned long clone_flags, struct task_struct *tsk)
894 {
895 #ifdef CONFIG_BLOCK
896         struct io_context *ioc = current->io_context;
897         struct io_context *new_ioc;
898
899         if (!ioc)
900                 return 0;
901         /*
902          * Share io context with parent, if CLONE_IO is set
903          */
904         if (clone_flags & CLONE_IO) {
905                 tsk->io_context = ioc_task_link(ioc);
906                 if (unlikely(!tsk->io_context))
907                         return -ENOMEM;
908         } else if (ioprio_valid(ioc->ioprio)) {
909                 new_ioc = get_task_io_context(tsk, GFP_KERNEL, NUMA_NO_NODE);
910                 if (unlikely(!new_ioc))
911                         return -ENOMEM;
912
913                 new_ioc->ioprio = ioc->ioprio;
914                 put_io_context(new_ioc);
915         }
916 #endif
917         return 0;
918 }
919
920 static int copy_sighand(unsigned long clone_flags, struct task_struct *tsk)
921 {
922         struct sighand_struct *sig;
923
924         if (clone_flags & CLONE_SIGHAND) {
925                 atomic_inc(&current->sighand->count);
926                 return 0;
927         }
928         sig = kmem_cache_alloc(sighand_cachep, GFP_KERNEL);
929         rcu_assign_pointer(tsk->sighand, sig);
930         if (!sig)
931                 return -ENOMEM;
932         atomic_set(&sig->count, 1);
933         memcpy(sig->action, current->sighand->action, sizeof(sig->action));
934         return 0;
935 }
936
937 void __cleanup_sighand(struct sighand_struct *sighand)
938 {
939         if (atomic_dec_and_test(&sighand->count)) {
940                 signalfd_cleanup(sighand);
941                 kmem_cache_free(sighand_cachep, sighand);
942         }
943 }
944
945
946 /*
947  * Initialize POSIX timer handling for a thread group.
948  */
949 static void posix_cpu_timers_init_group(struct signal_struct *sig)
950 {
951         unsigned long cpu_limit;
952
953         /* Thread group counters. */
954         thread_group_cputime_init(sig);
955
956         cpu_limit = ACCESS_ONCE(sig->rlim[RLIMIT_CPU].rlim_cur);
957         if (cpu_limit != RLIM_INFINITY) {
958                 sig->cputime_expires.prof_exp = secs_to_cputime(cpu_limit);
959                 sig->cputimer.running = 1;
960         }
961
962         /* The timer lists. */
963         INIT_LIST_HEAD(&sig->cpu_timers[0]);
964         INIT_LIST_HEAD(&sig->cpu_timers[1]);
965         INIT_LIST_HEAD(&sig->cpu_timers[2]);
966 }
967
968 static int copy_signal(unsigned long clone_flags, struct task_struct *tsk)
969 {
970         struct signal_struct *sig;
971
972         if (clone_flags & CLONE_THREAD)
973                 return 0;
974
975         sig = kmem_cache_zalloc(signal_cachep, GFP_KERNEL);
976         tsk->signal = sig;
977         if (!sig)
978                 return -ENOMEM;
979
980         sig->nr_threads = 1;
981         atomic_set(&sig->live, 1);
982         atomic_set(&sig->sigcnt, 1);
983         init_waitqueue_head(&sig->wait_chldexit);
984         if (clone_flags & CLONE_NEWPID)
985                 sig->flags |= SIGNAL_UNKILLABLE;
986         sig->curr_target = tsk;
987         init_sigpending(&sig->shared_pending);
988         INIT_LIST_HEAD(&sig->posix_timers);
989
990         hrtimer_init(&sig->real_timer, CLOCK_MONOTONIC, HRTIMER_MODE_REL);
991         sig->real_timer.function = it_real_fn;
992
993         task_lock(current->group_leader);
994         memcpy(sig->rlim, current->signal->rlim, sizeof sig->rlim);
995         task_unlock(current->group_leader);
996
997         posix_cpu_timers_init_group(sig);
998
999         tty_audit_fork(sig);
1000         sched_autogroup_fork(sig);
1001
1002 #ifdef CONFIG_CGROUPS
1003         init_rwsem(&sig->group_rwsem);
1004 #endif
1005
1006         sig->oom_adj = current->signal->oom_adj;
1007         sig->oom_score_adj = current->signal->oom_score_adj;
1008         sig->oom_score_adj_min = current->signal->oom_score_adj_min;
1009
1010         mutex_init(&sig->cred_guard_mutex);
1011
1012         return 0;
1013 }
1014
1015 static void copy_flags(unsigned long clone_flags, struct task_struct *p)
1016 {
1017         unsigned long new_flags = p->flags;
1018
1019         new_flags &= ~(PF_SUPERPRIV | PF_WQ_WORKER);
1020         new_flags |= PF_FORKNOEXEC;
1021         new_flags |= PF_STARTING;
1022         p->flags = new_flags;
1023 }
1024
1025 SYSCALL_DEFINE1(set_tid_address, int __user *, tidptr)
1026 {
1027         current->clear_child_tid = tidptr;
1028
1029         return task_pid_vnr(current);
1030 }
1031
1032 static void rt_mutex_init_task(struct task_struct *p)
1033 {
1034         raw_spin_lock_init(&p->pi_lock);
1035 #ifdef CONFIG_RT_MUTEXES
1036         plist_head_init(&p->pi_waiters);
1037         p->pi_blocked_on = NULL;
1038 #endif
1039 }
1040
1041 #ifdef CONFIG_MM_OWNER
1042 void mm_init_owner(struct mm_struct *mm, struct task_struct *p)
1043 {
1044         mm->owner = p;
1045 }
1046 #endif /* CONFIG_MM_OWNER */
1047
1048 /*
1049  * Initialize POSIX timer handling for a single task.
1050  */
1051 static void posix_cpu_timers_init(struct task_struct *tsk)
1052 {
1053         tsk->cputime_expires.prof_exp = 0;
1054         tsk->cputime_expires.virt_exp = 0;
1055         tsk->cputime_expires.sched_exp = 0;
1056         INIT_LIST_HEAD(&tsk->cpu_timers[0]);
1057         INIT_LIST_HEAD(&tsk->cpu_timers[1]);
1058         INIT_LIST_HEAD(&tsk->cpu_timers[2]);
1059 }
1060
1061 /*
1062  * This creates a new process as a copy of the old one,
1063  * but does not actually start it yet.
1064  *
1065  * It copies the registers, and all the appropriate
1066  * parts of the process environment (as per the clone
1067  * flags). The actual kick-off is left to the caller.
1068  */
1069 static struct task_struct *copy_process(unsigned long clone_flags,
1070                                         unsigned long stack_start,
1071                                         struct pt_regs *regs,
1072                                         unsigned long stack_size,
1073                                         int __user *child_tidptr,
1074                                         struct pid *pid,
1075                                         int trace)
1076 {
1077         int retval;
1078         struct task_struct *p;
1079         int cgroup_callbacks_done = 0;
1080
1081         if ((clone_flags & (CLONE_NEWNS|CLONE_FS)) == (CLONE_NEWNS|CLONE_FS))
1082                 return ERR_PTR(-EINVAL);
1083
1084         /*
1085          * Thread groups must share signals as well, and detached threads
1086          * can only be started up within the thread group.
1087          */
1088         if ((clone_flags & CLONE_THREAD) && !(clone_flags & CLONE_SIGHAND))
1089                 return ERR_PTR(-EINVAL);
1090
1091         /*
1092          * Shared signal handlers imply shared VM. By way of the above,
1093          * thread groups also imply shared VM. Blocking this case allows
1094          * for various simplifications in other code.
1095          */
1096         if ((clone_flags & CLONE_SIGHAND) && !(clone_flags & CLONE_VM))
1097                 return ERR_PTR(-EINVAL);
1098
1099         /*
1100          * Siblings of global init remain as zombies on exit since they are
1101          * not reaped by their parent (swapper). To solve this and to avoid
1102          * multi-rooted process trees, prevent global and container-inits
1103          * from creating siblings.
1104          */
1105         if ((clone_flags & CLONE_PARENT) &&
1106                                 current->signal->flags & SIGNAL_UNKILLABLE)
1107                 return ERR_PTR(-EINVAL);
1108
1109         retval = security_task_create(clone_flags);
1110         if (retval)
1111                 goto fork_out;
1112
1113         retval = -ENOMEM;
1114         p = dup_task_struct(current);
1115         if (!p)
1116                 goto fork_out;
1117
1118         ftrace_graph_init_task(p);
1119
1120         rt_mutex_init_task(p);
1121
1122 #ifdef CONFIG_PROVE_LOCKING
1123         DEBUG_LOCKS_WARN_ON(!p->hardirqs_enabled);
1124         DEBUG_LOCKS_WARN_ON(!p->softirqs_enabled);
1125 #endif
1126         retval = -EAGAIN;
1127         if (atomic_read(&p->real_cred->user->processes) >=
1128                         task_rlimit(p, RLIMIT_NPROC)) {
1129                 if (!capable(CAP_SYS_ADMIN) && !capable(CAP_SYS_RESOURCE) &&
1130                     p->real_cred->user != INIT_USER)
1131                         goto bad_fork_free;
1132         }
1133         current->flags &= ~PF_NPROC_EXCEEDED;
1134
1135         retval = copy_creds(p, clone_flags);
1136         if (retval < 0)
1137                 goto bad_fork_free;
1138
1139         /*
1140          * If multiple threads are within copy_process(), then this check
1141          * triggers too late. This doesn't hurt, the check is only there
1142          * to stop root fork bombs.
1143          */
1144         retval = -EAGAIN;
1145         if (nr_threads >= max_threads)
1146                 goto bad_fork_cleanup_count;
1147
1148         if (!try_module_get(task_thread_info(p)->exec_domain->module))
1149                 goto bad_fork_cleanup_count;
1150
1151         p->did_exec = 0;
1152         delayacct_tsk_init(p);  /* Must remain after dup_task_struct() */
1153         copy_flags(clone_flags, p);
1154         INIT_LIST_HEAD(&p->children);
1155         INIT_LIST_HEAD(&p->sibling);
1156         rcu_copy_process(p);
1157         p->vfork_done = NULL;
1158         spin_lock_init(&p->alloc_lock);
1159
1160         init_sigpending(&p->pending);
1161
1162         p->utime = p->stime = p->gtime = 0;
1163         p->utimescaled = p->stimescaled = 0;
1164 #ifndef CONFIG_VIRT_CPU_ACCOUNTING
1165         p->prev_utime = p->prev_stime = 0;
1166 #endif
1167 #if defined(SPLIT_RSS_COUNTING)
1168         memset(&p->rss_stat, 0, sizeof(p->rss_stat));
1169 #endif
1170
1171         p->default_timer_slack_ns = current->timer_slack_ns;
1172
1173         task_io_accounting_init(&p->ioac);
1174         acct_clear_integrals(p);
1175
1176         posix_cpu_timers_init(p);
1177
1178         do_posix_clock_monotonic_gettime(&p->start_time);
1179         p->real_start_time = p->start_time;
1180         monotonic_to_bootbased(&p->real_start_time);
1181         p->io_context = NULL;
1182         p->audit_context = NULL;
1183         if (clone_flags & CLONE_THREAD)
1184                 threadgroup_change_begin(current);
1185         cgroup_fork(p);
1186 #ifdef CONFIG_NUMA
1187         p->mempolicy = mpol_dup(p->mempolicy);
1188         if (IS_ERR(p->mempolicy)) {
1189                 retval = PTR_ERR(p->mempolicy);
1190                 p->mempolicy = NULL;
1191                 goto bad_fork_cleanup_cgroup;
1192         }
1193         mpol_fix_fork_child_flag(p);
1194 #endif
1195 #ifdef CONFIG_CPUSETS
1196         p->cpuset_mem_spread_rotor = NUMA_NO_NODE;
1197         p->cpuset_slab_spread_rotor = NUMA_NO_NODE;
1198 #endif
1199 #ifdef CONFIG_TRACE_IRQFLAGS
1200         p->irq_events = 0;
1201 #ifdef __ARCH_WANT_INTERRUPTS_ON_CTXSW
1202         p->hardirqs_enabled = 1;
1203 #else
1204         p->hardirqs_enabled = 0;
1205 #endif
1206         p->hardirq_enable_ip = 0;
1207         p->hardirq_enable_event = 0;
1208         p->hardirq_disable_ip = _THIS_IP_;
1209         p->hardirq_disable_event = 0;
1210         p->softirqs_enabled = 1;
1211         p->softirq_enable_ip = _THIS_IP_;
1212         p->softirq_enable_event = 0;
1213         p->softirq_disable_ip = 0;
1214         p->softirq_disable_event = 0;
1215         p->hardirq_context = 0;
1216         p->softirq_context = 0;
1217 #endif
1218 #ifdef CONFIG_LOCKDEP
1219         p->lockdep_depth = 0; /* no locks held yet */
1220         p->curr_chain_key = 0;
1221         p->lockdep_recursion = 0;
1222 #endif
1223
1224 #ifdef CONFIG_DEBUG_MUTEXES
1225         p->blocked_on = NULL; /* not blocked yet */
1226 #endif
1227 #ifdef CONFIG_CGROUP_MEM_RES_CTLR
1228         p->memcg_batch.do_batch = 0;
1229         p->memcg_batch.memcg = NULL;
1230 #endif
1231
1232         /* Perform scheduler related setup. Assign this task to a CPU. */
1233         sched_fork(p);
1234
1235         retval = perf_event_init_task(p);
1236         if (retval)
1237                 goto bad_fork_cleanup_policy;
1238         retval = audit_alloc(p);
1239         if (retval)
1240                 goto bad_fork_cleanup_policy;
1241         /* copy all the process information */
1242         retval = copy_semundo(clone_flags, p);
1243         if (retval)
1244                 goto bad_fork_cleanup_audit;
1245         retval = copy_files(clone_flags, p);
1246         if (retval)
1247                 goto bad_fork_cleanup_semundo;
1248         retval = copy_fs(clone_flags, p);
1249         if (retval)
1250                 goto bad_fork_cleanup_files;
1251         retval = copy_sighand(clone_flags, p);
1252         if (retval)
1253                 goto bad_fork_cleanup_fs;
1254         retval = copy_signal(clone_flags, p);
1255         if (retval)
1256                 goto bad_fork_cleanup_sighand;
1257         retval = copy_mm(clone_flags, p);
1258         if (retval)
1259                 goto bad_fork_cleanup_signal;
1260         retval = copy_namespaces(clone_flags, p);
1261         if (retval)
1262                 goto bad_fork_cleanup_mm;
1263         retval = copy_io(clone_flags, p);
1264         if (retval)
1265                 goto bad_fork_cleanup_namespaces;
1266         retval = copy_thread(clone_flags, stack_start, stack_size, p, regs);
1267         if (retval)
1268                 goto bad_fork_cleanup_io;
1269
1270         if (pid != &init_struct_pid) {
1271                 retval = -ENOMEM;
1272                 pid = alloc_pid(p->nsproxy->pid_ns);
1273                 if (!pid)
1274                         goto bad_fork_cleanup_io;
1275         }
1276
1277         p->pid = pid_nr(pid);
1278         p->tgid = p->pid;
1279         if (clone_flags & CLONE_THREAD)
1280                 p->tgid = current->tgid;
1281
1282         p->set_child_tid = (clone_flags & CLONE_CHILD_SETTID) ? child_tidptr : NULL;
1283         /*
1284          * Clear TID on mm_release()?
1285          */
1286         p->clear_child_tid = (clone_flags & CLONE_CHILD_CLEARTID) ? child_tidptr : NULL;
1287 #ifdef CONFIG_BLOCK
1288         p->plug = NULL;
1289 #endif
1290 #ifdef CONFIG_FUTEX
1291         p->robust_list = NULL;
1292 #ifdef CONFIG_COMPAT
1293         p->compat_robust_list = NULL;
1294 #endif
1295         INIT_LIST_HEAD(&p->pi_state_list);
1296         p->pi_state_cache = NULL;
1297 #endif
1298         /*
1299          * sigaltstack should be cleared when sharing the same VM
1300          */
1301         if ((clone_flags & (CLONE_VM|CLONE_VFORK)) == CLONE_VM)
1302                 p->sas_ss_sp = p->sas_ss_size = 0;
1303
1304         /*
1305          * Syscall tracing and stepping should be turned off in the
1306          * child regardless of CLONE_PTRACE.
1307          */
1308         user_disable_single_step(p);
1309         clear_tsk_thread_flag(p, TIF_SYSCALL_TRACE);
1310 #ifdef TIF_SYSCALL_EMU
1311         clear_tsk_thread_flag(p, TIF_SYSCALL_EMU);
1312 #endif
1313         clear_all_latency_tracing(p);
1314
1315         /* ok, now we should be set up.. */
1316         p->exit_signal = (clone_flags & CLONE_THREAD) ? -1 : (clone_flags & CSIGNAL);
1317         p->pdeath_signal = 0;
1318         p->exit_state = 0;
1319
1320         p->nr_dirtied = 0;
1321         p->nr_dirtied_pause = 128 >> (PAGE_SHIFT - 10);
1322         p->dirty_paused_when = 0;
1323
1324         /*
1325          * Ok, make it visible to the rest of the system.
1326          * We dont wake it up yet.
1327          */
1328         p->group_leader = p;
1329         INIT_LIST_HEAD(&p->thread_group);
1330
1331         /* Now that the task is set up, run cgroup callbacks if
1332          * necessary. We need to run them before the task is visible
1333          * on the tasklist. */
1334         cgroup_fork_callbacks(p);
1335         cgroup_callbacks_done = 1;
1336
1337         /* Need tasklist lock for parent etc handling! */
1338         write_lock_irq(&tasklist_lock);
1339
1340         /* CLONE_PARENT re-uses the old parent */
1341         if (clone_flags & (CLONE_PARENT|CLONE_THREAD)) {
1342                 p->real_parent = current->real_parent;
1343                 p->parent_exec_id = current->parent_exec_id;
1344         } else {
1345                 p->real_parent = current;
1346                 p->parent_exec_id = current->self_exec_id;
1347         }
1348
1349         spin_lock(&current->sighand->siglock);
1350
1351         /*
1352          * Process group and session signals need to be delivered to just the
1353          * parent before the fork or both the parent and the child after the
1354          * fork. Restart if a signal comes in before we add the new process to
1355          * it's process group.
1356          * A fatal signal pending means that current will exit, so the new
1357          * thread can't slip out of an OOM kill (or normal SIGKILL).
1358         */
1359         recalc_sigpending();
1360         if (signal_pending(current)) {
1361                 spin_unlock(&current->sighand->siglock);
1362                 write_unlock_irq(&tasklist_lock);
1363                 retval = -ERESTARTNOINTR;
1364                 goto bad_fork_free_pid;
1365         }
1366
1367         if (clone_flags & CLONE_THREAD) {
1368                 current->signal->nr_threads++;
1369                 atomic_inc(&current->signal->live);
1370                 atomic_inc(&current->signal->sigcnt);
1371                 p->group_leader = current->group_leader;
1372                 list_add_tail_rcu(&p->thread_group, &p->group_leader->thread_group);
1373         }
1374
1375         if (likely(p->pid)) {
1376                 ptrace_init_task(p, (clone_flags & CLONE_PTRACE) || trace);
1377
1378                 if (thread_group_leader(p)) {
1379                         if (is_child_reaper(pid))
1380                                 p->nsproxy->pid_ns->child_reaper = p;
1381
1382                         p->signal->leader_pid = pid;
1383                         p->signal->tty = tty_kref_get(current->signal->tty);
1384                         attach_pid(p, PIDTYPE_PGID, task_pgrp(current));
1385                         attach_pid(p, PIDTYPE_SID, task_session(current));
1386                         list_add_tail(&p->sibling, &p->real_parent->children);
1387                         list_add_tail_rcu(&p->tasks, &init_task.tasks);
1388                         __this_cpu_inc(process_counts);
1389                 }
1390                 attach_pid(p, PIDTYPE_PID, pid);
1391                 nr_threads++;
1392         }
1393
1394         total_forks++;
1395         spin_unlock(&current->sighand->siglock);
1396         write_unlock_irq(&tasklist_lock);
1397         proc_fork_connector(p);
1398         cgroup_post_fork(p);
1399         if (clone_flags & CLONE_THREAD)
1400                 threadgroup_change_end(current);
1401         perf_event_fork(p);
1402
1403         trace_task_newtask(p, clone_flags);
1404
1405         return p;
1406
1407 bad_fork_free_pid:
1408         if (pid != &init_struct_pid)
1409                 free_pid(pid);
1410 bad_fork_cleanup_io:
1411         if (p->io_context)
1412                 exit_io_context(p);
1413 bad_fork_cleanup_namespaces:
1414         exit_task_namespaces(p);
1415 bad_fork_cleanup_mm:
1416         if (p->mm)
1417                 mmput(p->mm);
1418 bad_fork_cleanup_signal:
1419         if (!(clone_flags & CLONE_THREAD))
1420                 free_signal_struct(p->signal);
1421 bad_fork_cleanup_sighand:
1422         __cleanup_sighand(p->sighand);
1423 bad_fork_cleanup_fs:
1424         exit_fs(p); /* blocking */
1425 bad_fork_cleanup_files:
1426         exit_files(p); /* blocking */
1427 bad_fork_cleanup_semundo:
1428         exit_sem(p);
1429 bad_fork_cleanup_audit:
1430         audit_free(p);
1431 bad_fork_cleanup_policy:
1432         perf_event_free_task(p);
1433 #ifdef CONFIG_NUMA
1434         mpol_put(p->mempolicy);
1435 bad_fork_cleanup_cgroup:
1436 #endif
1437         if (clone_flags & CLONE_THREAD)
1438                 threadgroup_change_end(current);
1439         cgroup_exit(p, cgroup_callbacks_done);
1440         delayacct_tsk_free(p);
1441         module_put(task_thread_info(p)->exec_domain->module);
1442 bad_fork_cleanup_count:
1443         atomic_dec(&p->cred->user->processes);
1444         exit_creds(p);
1445 bad_fork_free:
1446         free_task(p);
1447 fork_out:
1448         return ERR_PTR(retval);
1449 }
1450
1451 noinline struct pt_regs * __cpuinit __attribute__((weak)) idle_regs(struct pt_regs *regs)
1452 {
1453         memset(regs, 0, sizeof(struct pt_regs));
1454         return regs;
1455 }
1456
1457 static inline void init_idle_pids(struct pid_link *links)
1458 {
1459         enum pid_type type;
1460
1461         for (type = PIDTYPE_PID; type < PIDTYPE_MAX; ++type) {
1462                 INIT_HLIST_NODE(&links[type].node); /* not really needed */
1463                 links[type].pid = &init_struct_pid;
1464         }
1465 }
1466
1467 struct task_struct * __cpuinit fork_idle(int cpu)
1468 {
1469         struct task_struct *task;
1470         struct pt_regs regs;
1471
1472         task = copy_process(CLONE_VM, 0, idle_regs(&regs), 0, NULL,
1473                             &init_struct_pid, 0);
1474         if (!IS_ERR(task)) {
1475                 init_idle_pids(task->pids);
1476                 init_idle(task, cpu);
1477         }
1478
1479         return task;
1480 }
1481
1482 /*
1483  *  Ok, this is the main fork-routine.
1484  *
1485  * It copies the process, and if successful kick-starts
1486  * it and waits for it to finish using the VM if required.
1487  */
1488 long do_fork(unsigned long clone_flags,
1489               unsigned long stack_start,
1490               struct pt_regs *regs,
1491               unsigned long stack_size,
1492               int __user *parent_tidptr,
1493               int __user *child_tidptr)
1494 {
1495         struct task_struct *p;
1496         int trace = 0;
1497         long nr;
1498
1499         /*
1500          * Do some preliminary argument and permissions checking before we
1501          * actually start allocating stuff
1502          */
1503         if (clone_flags & CLONE_NEWUSER) {
1504                 if (clone_flags & CLONE_THREAD)
1505                         return -EINVAL;
1506                 /* hopefully this check will go away when userns support is
1507                  * complete
1508                  */
1509                 if (!capable(CAP_SYS_ADMIN) || !capable(CAP_SETUID) ||
1510                                 !capable(CAP_SETGID))
1511                         return -EPERM;
1512         }
1513
1514         /*
1515          * Determine whether and which event to report to ptracer.  When
1516          * called from kernel_thread or CLONE_UNTRACED is explicitly
1517          * requested, no event is reported; otherwise, report if the event
1518          * for the type of forking is enabled.
1519          */
1520         if (likely(user_mode(regs)) && !(clone_flags & CLONE_UNTRACED)) {
1521                 if (clone_flags & CLONE_VFORK)
1522                         trace = PTRACE_EVENT_VFORK;
1523                 else if ((clone_flags & CSIGNAL) != SIGCHLD)
1524                         trace = PTRACE_EVENT_CLONE;
1525                 else
1526                         trace = PTRACE_EVENT_FORK;
1527
1528                 if (likely(!ptrace_event_enabled(current, trace)))
1529                         trace = 0;
1530         }
1531
1532         p = copy_process(clone_flags, stack_start, regs, stack_size,
1533                          child_tidptr, NULL, trace);
1534         /*
1535          * Do this prior waking up the new thread - the thread pointer
1536          * might get invalid after that point, if the thread exits quickly.
1537          */
1538         if (!IS_ERR(p)) {
1539                 struct completion vfork;
1540
1541                 trace_sched_process_fork(current, p);
1542
1543                 nr = task_pid_vnr(p);
1544
1545                 if (clone_flags & CLONE_PARENT_SETTID)
1546                         put_user(nr, parent_tidptr);
1547
1548                 if (clone_flags & CLONE_VFORK) {
1549                         p->vfork_done = &vfork;
1550                         init_completion(&vfork);
1551                 }
1552
1553                 /*
1554                  * We set PF_STARTING at creation in case tracing wants to
1555                  * use this to distinguish a fully live task from one that
1556                  * hasn't finished SIGSTOP raising yet.  Now we clear it
1557                  * and set the child going.
1558                  */
1559                 p->flags &= ~PF_STARTING;
1560
1561                 wake_up_new_task(p);
1562
1563                 /* forking complete and child started to run, tell ptracer */
1564                 if (unlikely(trace))
1565                         ptrace_event(trace, nr);
1566
1567                 if (clone_flags & CLONE_VFORK) {
1568                         freezer_do_not_count();
1569                         wait_for_completion(&vfork);
1570                         freezer_count();
1571                         ptrace_event(PTRACE_EVENT_VFORK_DONE, nr);
1572                 }
1573         } else {
1574                 nr = PTR_ERR(p);
1575         }
1576         return nr;
1577 }
1578
1579 #ifndef ARCH_MIN_MMSTRUCT_ALIGN
1580 #define ARCH_MIN_MMSTRUCT_ALIGN 0
1581 #endif
1582
1583 static void sighand_ctor(void *data)
1584 {
1585         struct sighand_struct *sighand = data;
1586
1587         spin_lock_init(&sighand->siglock);
1588         init_waitqueue_head(&sighand->signalfd_wqh);
1589 }
1590
1591 void __init proc_caches_init(void)
1592 {
1593         sighand_cachep = kmem_cache_create("sighand_cache",
1594                         sizeof(struct sighand_struct), 0,
1595                         SLAB_HWCACHE_ALIGN|SLAB_PANIC|SLAB_DESTROY_BY_RCU|
1596                         SLAB_NOTRACK, sighand_ctor);
1597         signal_cachep = kmem_cache_create("signal_cache",
1598                         sizeof(struct signal_struct), 0,
1599                         SLAB_HWCACHE_ALIGN|SLAB_PANIC|SLAB_NOTRACK, NULL);
1600         files_cachep = kmem_cache_create("files_cache",
1601                         sizeof(struct files_struct), 0,
1602                         SLAB_HWCACHE_ALIGN|SLAB_PANIC|SLAB_NOTRACK, NULL);
1603         fs_cachep = kmem_cache_create("fs_cache",
1604                         sizeof(struct fs_struct), 0,
1605                         SLAB_HWCACHE_ALIGN|SLAB_PANIC|SLAB_NOTRACK, NULL);
1606         /*
1607          * FIXME! The "sizeof(struct mm_struct)" currently includes the
1608          * whole struct cpumask for the OFFSTACK case. We could change
1609          * this to *only* allocate as much of it as required by the
1610          * maximum number of CPU's we can ever have.  The cpumask_allocation
1611          * is at the end of the structure, exactly for that reason.
1612          */
1613         mm_cachep = kmem_cache_create("mm_struct",
1614                         sizeof(struct mm_struct), ARCH_MIN_MMSTRUCT_ALIGN,
1615                         SLAB_HWCACHE_ALIGN|SLAB_PANIC|SLAB_NOTRACK, NULL);
1616         vm_area_cachep = KMEM_CACHE(vm_area_struct, SLAB_PANIC);
1617         mmap_init();
1618         nsproxy_cache_init();
1619 }
1620
1621 /*
1622  * Check constraints on flags passed to the unshare system call.
1623  */
1624 static int check_unshare_flags(unsigned long unshare_flags)
1625 {
1626         if (unshare_flags & ~(CLONE_THREAD|CLONE_FS|CLONE_NEWNS|CLONE_SIGHAND|
1627                                 CLONE_VM|CLONE_FILES|CLONE_SYSVSEM|
1628                                 CLONE_NEWUTS|CLONE_NEWIPC|CLONE_NEWNET))
1629                 return -EINVAL;
1630         /*
1631          * Not implemented, but pretend it works if there is nothing to
1632          * unshare. Note that unsharing CLONE_THREAD or CLONE_SIGHAND
1633          * needs to unshare vm.
1634          */
1635         if (unshare_flags & (CLONE_THREAD | CLONE_SIGHAND | CLONE_VM)) {
1636                 /* FIXME: get_task_mm() increments ->mm_users */
1637                 if (atomic_read(&current->mm->mm_users) > 1)
1638                         return -EINVAL;
1639         }
1640
1641         return 0;
1642 }
1643
1644 /*
1645  * Unshare the filesystem structure if it is being shared
1646  */
1647 static int unshare_fs(unsigned long unshare_flags, struct fs_struct **new_fsp)
1648 {
1649         struct fs_struct *fs = current->fs;
1650
1651         if (!(unshare_flags & CLONE_FS) || !fs)
1652                 return 0;
1653
1654         /* don't need lock here; in the worst case we'll do useless copy */
1655         if (fs->users == 1)
1656                 return 0;
1657
1658         *new_fsp = copy_fs_struct(fs);
1659         if (!*new_fsp)
1660                 return -ENOMEM;
1661
1662         return 0;
1663 }
1664
1665 /*
1666  * Unshare file descriptor table if it is being shared
1667  */
1668 static int unshare_fd(unsigned long unshare_flags, struct files_struct **new_fdp)
1669 {
1670         struct files_struct *fd = current->files;
1671         int error = 0;
1672
1673         if ((unshare_flags & CLONE_FILES) &&
1674             (fd && atomic_read(&fd->count) > 1)) {
1675                 *new_fdp = dup_fd(fd, &error);
1676                 if (!*new_fdp)
1677                         return error;
1678         }
1679
1680         return 0;
1681 }
1682
1683 /*
1684  * unshare allows a process to 'unshare' part of the process
1685  * context which was originally shared using clone.  copy_*
1686  * functions used by do_fork() cannot be used here directly
1687  * because they modify an inactive task_struct that is being
1688  * constructed. Here we are modifying the current, active,
1689  * task_struct.
1690  */
1691 SYSCALL_DEFINE1(unshare, unsigned long, unshare_flags)
1692 {
1693         struct fs_struct *fs, *new_fs = NULL;
1694         struct files_struct *fd, *new_fd = NULL;
1695         struct nsproxy *new_nsproxy = NULL;
1696         int do_sysvsem = 0;
1697         int err;
1698
1699         err = check_unshare_flags(unshare_flags);
1700         if (err)
1701                 goto bad_unshare_out;
1702
1703         /*
1704          * If unsharing namespace, must also unshare filesystem information.
1705          */
1706         if (unshare_flags & CLONE_NEWNS)
1707                 unshare_flags |= CLONE_FS;
1708         /*
1709          * CLONE_NEWIPC must also detach from the undolist: after switching
1710          * to a new ipc namespace, the semaphore arrays from the old
1711          * namespace are unreachable.
1712          */
1713         if (unshare_flags & (CLONE_NEWIPC|CLONE_SYSVSEM))
1714                 do_sysvsem = 1;
1715         err = unshare_fs(unshare_flags, &new_fs);
1716         if (err)
1717                 goto bad_unshare_out;
1718         err = unshare_fd(unshare_flags, &new_fd);
1719         if (err)
1720                 goto bad_unshare_cleanup_fs;
1721         err = unshare_nsproxy_namespaces(unshare_flags, &new_nsproxy, new_fs);
1722         if (err)
1723                 goto bad_unshare_cleanup_fd;
1724
1725         if (new_fs || new_fd || do_sysvsem || new_nsproxy) {
1726                 if (do_sysvsem) {
1727                         /*
1728                          * CLONE_SYSVSEM is equivalent to sys_exit().
1729                          */
1730                         exit_sem(current);
1731                 }
1732
1733                 if (new_nsproxy) {
1734                         switch_task_namespaces(current, new_nsproxy);
1735                         new_nsproxy = NULL;
1736                 }
1737
1738                 task_lock(current);
1739
1740                 if (new_fs) {
1741                         fs = current->fs;
1742                         spin_lock(&fs->lock);
1743                         current->fs = new_fs;
1744                         if (--fs->users)
1745                                 new_fs = NULL;
1746                         else
1747                                 new_fs = fs;
1748                         spin_unlock(&fs->lock);
1749                 }
1750
1751                 if (new_fd) {
1752                         fd = current->files;
1753                         current->files = new_fd;
1754                         new_fd = fd;
1755                 }
1756
1757                 task_unlock(current);
1758         }
1759
1760         if (new_nsproxy)
1761                 put_nsproxy(new_nsproxy);
1762
1763 bad_unshare_cleanup_fd:
1764         if (new_fd)
1765                 put_files_struct(new_fd);
1766
1767 bad_unshare_cleanup_fs:
1768         if (new_fs)
1769                 free_fs_struct(new_fs);
1770
1771 bad_unshare_out:
1772         return err;
1773 }
1774
1775 /*
1776  *      Helper to unshare the files of the current task.
1777  *      We don't want to expose copy_files internals to
1778  *      the exec layer of the kernel.
1779  */
1780
1781 int unshare_files(struct files_struct **displaced)
1782 {
1783         struct task_struct *task = current;
1784         struct files_struct *copy = NULL;
1785         int error;
1786
1787         error = unshare_fd(CLONE_FILES, &copy);
1788         if (error || !copy) {
1789                 *displaced = NULL;
1790                 return error;
1791         }
1792         *displaced = task->files;
1793         task_lock(task);
1794         task->files = copy;
1795         task_unlock(task);
1796         return 0;
1797 }