]> git.kernelconcepts.de Git - karo-tx-linux.git/blob - kernel/cgroup/debug.c
Merge branches 'intel_pstate' and 'pm-domains'
[karo-tx-linux.git] / kernel / cgroup / debug.c
1 /*
2  * Debug controller
3  *
4  * WARNING: This controller is for cgroup core debugging only.
5  * Its interfaces are unstable and subject to changes at any time.
6  */
7 #include <linux/ctype.h>
8 #include <linux/mm.h>
9 #include <linux/slab.h>
10
11 #include "cgroup-internal.h"
12
13 static struct cgroup_subsys_state *
14 debug_css_alloc(struct cgroup_subsys_state *parent_css)
15 {
16         struct cgroup_subsys_state *css = kzalloc(sizeof(*css), GFP_KERNEL);
17
18         if (!css)
19                 return ERR_PTR(-ENOMEM);
20
21         return css;
22 }
23
24 static void debug_css_free(struct cgroup_subsys_state *css)
25 {
26         kfree(css);
27 }
28
29 /*
30  * debug_taskcount_read - return the number of tasks in a cgroup.
31  * @cgrp: the cgroup in question
32  */
33 static u64 debug_taskcount_read(struct cgroup_subsys_state *css,
34                                 struct cftype *cft)
35 {
36         return cgroup_task_count(css->cgroup);
37 }
38
39 static int current_css_set_read(struct seq_file *seq, void *v)
40 {
41         struct kernfs_open_file *of = seq->private;
42         struct css_set *cset;
43         struct cgroup_subsys *ss;
44         struct cgroup_subsys_state *css;
45         int i, refcnt;
46
47         if (!cgroup_kn_lock_live(of->kn, false))
48                 return -ENODEV;
49
50         spin_lock_irq(&css_set_lock);
51         rcu_read_lock();
52         cset = rcu_dereference(current->cgroups);
53         refcnt = refcount_read(&cset->refcount);
54         seq_printf(seq, "css_set %pK %d", cset, refcnt);
55         if (refcnt > cset->nr_tasks)
56                 seq_printf(seq, " +%d", refcnt - cset->nr_tasks);
57         seq_puts(seq, "\n");
58
59         /*
60          * Print the css'es stored in the current css_set.
61          */
62         for_each_subsys(ss, i) {
63                 css = cset->subsys[ss->id];
64                 if (!css)
65                         continue;
66                 seq_printf(seq, "%2d: %-4s\t- %lx[%d]\n", ss->id, ss->name,
67                           (unsigned long)css, css->id);
68         }
69         rcu_read_unlock();
70         spin_unlock_irq(&css_set_lock);
71         cgroup_kn_unlock(of->kn);
72         return 0;
73 }
74
75 static u64 current_css_set_refcount_read(struct cgroup_subsys_state *css,
76                                          struct cftype *cft)
77 {
78         u64 count;
79
80         rcu_read_lock();
81         count = refcount_read(&task_css_set(current)->refcount);
82         rcu_read_unlock();
83         return count;
84 }
85
86 static int current_css_set_cg_links_read(struct seq_file *seq, void *v)
87 {
88         struct cgrp_cset_link *link;
89         struct css_set *cset;
90         char *name_buf;
91
92         name_buf = kmalloc(NAME_MAX + 1, GFP_KERNEL);
93         if (!name_buf)
94                 return -ENOMEM;
95
96         spin_lock_irq(&css_set_lock);
97         rcu_read_lock();
98         cset = rcu_dereference(current->cgroups);
99         list_for_each_entry(link, &cset->cgrp_links, cgrp_link) {
100                 struct cgroup *c = link->cgrp;
101
102                 cgroup_name(c, name_buf, NAME_MAX + 1);
103                 seq_printf(seq, "Root %d group %s\n",
104                            c->root->hierarchy_id, name_buf);
105         }
106         rcu_read_unlock();
107         spin_unlock_irq(&css_set_lock);
108         kfree(name_buf);
109         return 0;
110 }
111
112 #define MAX_TASKS_SHOWN_PER_CSS 25
113 static int cgroup_css_links_read(struct seq_file *seq, void *v)
114 {
115         struct cgroup_subsys_state *css = seq_css(seq);
116         struct cgrp_cset_link *link;
117         int dead_cnt = 0, extra_refs = 0;
118
119         spin_lock_irq(&css_set_lock);
120         list_for_each_entry(link, &css->cgroup->cset_links, cset_link) {
121                 struct css_set *cset = link->cset;
122                 struct task_struct *task;
123                 int count = 0;
124                 int refcnt = refcount_read(&cset->refcount);
125
126                 seq_printf(seq, " %d", refcnt);
127                 if (refcnt - cset->nr_tasks > 0) {
128                         int extra = refcnt - cset->nr_tasks;
129
130                         seq_printf(seq, " +%d", extra);
131                         /*
132                          * Take out the one additional reference in
133                          * init_css_set.
134                          */
135                         if (cset == &init_css_set)
136                                 extra--;
137                         extra_refs += extra;
138                 }
139                 seq_puts(seq, "\n");
140
141                 list_for_each_entry(task, &cset->tasks, cg_list) {
142                         if (count++ <= MAX_TASKS_SHOWN_PER_CSS)
143                                 seq_printf(seq, "  task %d\n",
144                                            task_pid_vnr(task));
145                 }
146
147                 list_for_each_entry(task, &cset->mg_tasks, cg_list) {
148                         if (count++ <= MAX_TASKS_SHOWN_PER_CSS)
149                                 seq_printf(seq, "  task %d\n",
150                                            task_pid_vnr(task));
151                 }
152                 /* show # of overflowed tasks */
153                 if (count > MAX_TASKS_SHOWN_PER_CSS)
154                         seq_printf(seq, "  ... (%d)\n",
155                                    count - MAX_TASKS_SHOWN_PER_CSS);
156
157                 if (cset->dead) {
158                         seq_puts(seq, "    [dead]\n");
159                         dead_cnt++;
160                 }
161
162                 WARN_ON(count != cset->nr_tasks);
163         }
164         spin_unlock_irq(&css_set_lock);
165
166         if (!dead_cnt && !extra_refs)
167                 return 0;
168
169         seq_puts(seq, "\n");
170         if (extra_refs)
171                 seq_printf(seq, "extra references = %d\n", extra_refs);
172         if (dead_cnt)
173                 seq_printf(seq, "dead css_sets = %d\n", dead_cnt);
174
175         return 0;
176 }
177
178 static int cgroup_subsys_states_read(struct seq_file *seq, void *v)
179 {
180         struct kernfs_open_file *of = seq->private;
181         struct cgroup *cgrp;
182         struct cgroup_subsys *ss;
183         struct cgroup_subsys_state *css;
184         char pbuf[16];
185         int i;
186
187         cgrp = cgroup_kn_lock_live(of->kn, false);
188         if (!cgrp)
189                 return -ENODEV;
190
191         for_each_subsys(ss, i) {
192                 css = rcu_dereference_check(cgrp->subsys[ss->id], true);
193                 if (!css)
194                         continue;
195
196                 pbuf[0] = '\0';
197
198                 /* Show the parent CSS if applicable*/
199                 if (css->parent)
200                         snprintf(pbuf, sizeof(pbuf) - 1, " P=%d",
201                                  css->parent->id);
202                 seq_printf(seq, "%2d: %-4s\t- %lx[%d] %d%s\n", ss->id, ss->name,
203                           (unsigned long)css, css->id,
204                           atomic_read(&css->online_cnt), pbuf);
205         }
206
207         cgroup_kn_unlock(of->kn);
208         return 0;
209 }
210
211 static void cgroup_masks_read_one(struct seq_file *seq, const char *name,
212                                   u16 mask)
213 {
214         struct cgroup_subsys *ss;
215         int ssid;
216         bool first = true;
217
218         seq_printf(seq, "%-17s: ", name);
219         for_each_subsys(ss, ssid) {
220                 if (!(mask & (1 << ssid)))
221                         continue;
222                 if (!first)
223                         seq_puts(seq, ", ");
224                 seq_puts(seq, ss->name);
225                 first = false;
226         }
227         seq_putc(seq, '\n');
228 }
229
230 static int cgroup_masks_read(struct seq_file *seq, void *v)
231 {
232         struct kernfs_open_file *of = seq->private;
233         struct cgroup *cgrp;
234
235         cgrp = cgroup_kn_lock_live(of->kn, false);
236         if (!cgrp)
237                 return -ENODEV;
238
239         cgroup_masks_read_one(seq, "subtree_control", cgrp->subtree_control);
240         cgroup_masks_read_one(seq, "subtree_ss_mask", cgrp->subtree_ss_mask);
241
242         cgroup_kn_unlock(of->kn);
243         return 0;
244 }
245
246 static u64 releasable_read(struct cgroup_subsys_state *css, struct cftype *cft)
247 {
248         return (!cgroup_is_populated(css->cgroup) &&
249                 !css_has_online_children(&css->cgroup->self));
250 }
251
252 static struct cftype debug_legacy_files[] =  {
253         {
254                 .name = "taskcount",
255                 .read_u64 = debug_taskcount_read,
256         },
257
258         {
259                 .name = "current_css_set",
260                 .seq_show = current_css_set_read,
261                 .flags = CFTYPE_ONLY_ON_ROOT,
262         },
263
264         {
265                 .name = "current_css_set_refcount",
266                 .read_u64 = current_css_set_refcount_read,
267                 .flags = CFTYPE_ONLY_ON_ROOT,
268         },
269
270         {
271                 .name = "current_css_set_cg_links",
272                 .seq_show = current_css_set_cg_links_read,
273                 .flags = CFTYPE_ONLY_ON_ROOT,
274         },
275
276         {
277                 .name = "cgroup_css_links",
278                 .seq_show = cgroup_css_links_read,
279         },
280
281         {
282                 .name = "cgroup_subsys_states",
283                 .seq_show = cgroup_subsys_states_read,
284         },
285
286         {
287                 .name = "cgroup_masks",
288                 .seq_show = cgroup_masks_read,
289         },
290
291         {
292                 .name = "releasable",
293                 .read_u64 = releasable_read,
294         },
295
296         { }     /* terminate */
297 };
298
299 static struct cftype debug_files[] =  {
300         {
301                 .name = "taskcount",
302                 .read_u64 = debug_taskcount_read,
303         },
304
305         {
306                 .name = "current_css_set",
307                 .seq_show = current_css_set_read,
308                 .flags = CFTYPE_ONLY_ON_ROOT,
309         },
310
311         {
312                 .name = "current_css_set_refcount",
313                 .read_u64 = current_css_set_refcount_read,
314                 .flags = CFTYPE_ONLY_ON_ROOT,
315         },
316
317         {
318                 .name = "current_css_set_cg_links",
319                 .seq_show = current_css_set_cg_links_read,
320                 .flags = CFTYPE_ONLY_ON_ROOT,
321         },
322
323         {
324                 .name = "css_links",
325                 .seq_show = cgroup_css_links_read,
326         },
327
328         {
329                 .name = "csses",
330                 .seq_show = cgroup_subsys_states_read,
331         },
332
333         {
334                 .name = "masks",
335                 .seq_show = cgroup_masks_read,
336         },
337
338         { }     /* terminate */
339 };
340
341 struct cgroup_subsys debug_cgrp_subsys = {
342         .css_alloc      = debug_css_alloc,
343         .css_free       = debug_css_free,
344         .legacy_cftypes = debug_legacy_files,
345 };
346
347 /*
348  * On v2, debug is an implicit controller enabled by "cgroup_debug" boot
349  * parameter.
350  */
351 static int __init enable_cgroup_debug(char *str)
352 {
353         debug_cgrp_subsys.dfl_cftypes = debug_files;
354         debug_cgrp_subsys.implicit_on_dfl = true;
355         return 1;
356 }
357 __setup("cgroup_debug", enable_cgroup_debug);