]> git.kernelconcepts.de Git - karo-tx-linux.git/blob - kernel/task_work.c
cgroup: fix cgroup hierarchy umount race
[karo-tx-linux.git] / kernel / task_work.c
1 #include <linux/spinlock.h>
2 #include <linux/task_work.h>
3 #include <linux/tracehook.h>
4
5 int
6 task_work_add(struct task_struct *task, struct task_work *twork, bool notify)
7 {
8         unsigned long flags;
9         int err = -ESRCH;
10
11 #ifndef TIF_NOTIFY_RESUME
12         if (notify)
13                 return -ENOTSUPP;
14 #endif
15         /*
16          * We must not insert the new work if the task has already passed
17          * exit_task_work(). We rely on do_exit()->raw_spin_unlock_wait()
18          * and check PF_EXITING under pi_lock.
19          */
20         raw_spin_lock_irqsave(&task->pi_lock, flags);
21         if (likely(!(task->flags & PF_EXITING))) {
22                 hlist_add_head(&twork->hlist, &task->task_works);
23                 err = 0;
24         }
25         raw_spin_unlock_irqrestore(&task->pi_lock, flags);
26
27         /* test_and_set_bit() implies mb(), see tracehook_notify_resume(). */
28         if (likely(!err) && notify)
29                 set_notify_resume(task);
30         return err;
31 }
32
33 struct task_work *
34 task_work_cancel(struct task_struct *task, task_work_func_t func)
35 {
36         unsigned long flags;
37         struct task_work *twork;
38         struct hlist_node *pos;
39
40         raw_spin_lock_irqsave(&task->pi_lock, flags);
41         hlist_for_each_entry(twork, pos, &task->task_works, hlist) {
42                 if (twork->func == func) {
43                         hlist_del(&twork->hlist);
44                         goto found;
45                 }
46         }
47         twork = NULL;
48  found:
49         raw_spin_unlock_irqrestore(&task->pi_lock, flags);
50
51         return twork;
52 }
53
54 void task_work_run(void)
55 {
56         struct task_struct *task = current;
57         struct hlist_head task_works;
58         struct hlist_node *pos;
59
60         raw_spin_lock_irq(&task->pi_lock);
61         hlist_move_list(&task->task_works, &task_works);
62         raw_spin_unlock_irq(&task->pi_lock);
63
64         if (unlikely(hlist_empty(&task_works)))
65                 return;
66         /*
67          * We use hlist to save the space in task_struct, but we want fifo.
68          * Find the last entry, the list should be short, then process them
69          * in reverse order.
70          */
71         for (pos = task_works.first; pos->next; pos = pos->next)
72                 ;
73
74         for (;;) {
75                 struct hlist_node **pprev = pos->pprev;
76                 struct task_work *twork = container_of(pos, struct task_work,
77                                                         hlist);
78                 twork->func(twork);
79
80                 if (pprev == &task_works.first)
81                         break;
82                 pos = container_of(pprev, struct hlist_node, next);
83         }
84 }