2 #include <linux/fsnotify_backend.h>
3 #include <linux/namei.h>
4 #include <linux/mount.h>
5 #include <linux/kthread.h>
6 #include <linux/slab.h>
14 struct audit_chunk *root;
15 struct list_head chunks;
16 struct list_head rules;
17 struct list_head list;
18 struct list_head same_root;
24 struct list_head hash;
25 struct fsnotify_mark mark;
26 struct list_head trees; /* with root here */
32 struct list_head list;
33 struct audit_tree *owner;
34 unsigned index; /* index; upper bit indicates 'will prune' */
38 static LIST_HEAD(tree_list);
39 static LIST_HEAD(prune_list);
40 static struct task_struct *prune_thread;
43 * One struct chunk is attached to each inode of interest.
44 * We replace struct chunk on tagging/untagging.
45 * Rules have pointer to struct audit_tree.
46 * Rules have struct list_head rlist forming a list of rules over
48 * References to struct chunk are collected at audit_inode{,_child}()
49 * time and used in AUDIT_TREE rule matching.
50 * These references are dropped at the same time we are calling
51 * audit_free_names(), etc.
53 * Cyclic lists galore:
54 * tree.chunks anchors chunk.owners[].list hash_lock
55 * tree.rules anchors rule.rlist audit_filter_mutex
56 * chunk.trees anchors tree.same_root hash_lock
57 * chunk.hash is a hash with middle bits of watch.inode as
58 * a hash function. RCU, hash_lock
60 * tree is refcounted; one reference for "some rules on rules_list refer to
61 * it", one for each chunk with pointer to it.
63 * chunk is refcounted by embedded fsnotify_mark + .refs (non-zero refcount
64 * of watch contributes 1 to .refs).
66 * node.index allows to get from node.list to containing chunk.
67 * MSB of that sucker is stolen to mark taggings that we might have to
68 * revert - several operations have very unpleasant cleanup logics and
69 * that makes a difference. Some.
72 static struct fsnotify_group *audit_tree_group;
74 static struct audit_tree *alloc_tree(const char *s)
76 struct audit_tree *tree;
78 tree = kmalloc(sizeof(struct audit_tree) + strlen(s) + 1, GFP_KERNEL);
80 atomic_set(&tree->count, 1);
82 INIT_LIST_HEAD(&tree->chunks);
83 INIT_LIST_HEAD(&tree->rules);
84 INIT_LIST_HEAD(&tree->list);
85 INIT_LIST_HEAD(&tree->same_root);
87 strcpy(tree->pathname, s);
92 static inline void get_tree(struct audit_tree *tree)
94 atomic_inc(&tree->count);
97 static inline void put_tree(struct audit_tree *tree)
99 if (atomic_dec_and_test(&tree->count))
100 kfree_rcu(tree, head);
103 /* to avoid bringing the entire thing in audit.h */
104 const char *audit_tree_path(struct audit_tree *tree)
106 return tree->pathname;
109 static void free_chunk(struct audit_chunk *chunk)
113 for (i = 0; i < chunk->count; i++) {
114 if (chunk->owners[i].owner)
115 put_tree(chunk->owners[i].owner);
120 void audit_put_chunk(struct audit_chunk *chunk)
122 if (atomic_long_dec_and_test(&chunk->refs))
126 static void __put_chunk(struct rcu_head *rcu)
128 struct audit_chunk *chunk = container_of(rcu, struct audit_chunk, head);
129 audit_put_chunk(chunk);
132 static void audit_tree_destroy_watch(struct fsnotify_mark *entry)
134 struct audit_chunk *chunk = container_of(entry, struct audit_chunk, mark);
135 call_rcu(&chunk->head, __put_chunk);
138 static struct audit_chunk *alloc_chunk(int count)
140 struct audit_chunk *chunk;
144 size = offsetof(struct audit_chunk, owners) + count * sizeof(struct node);
145 chunk = kzalloc(size, GFP_KERNEL);
149 INIT_LIST_HEAD(&chunk->hash);
150 INIT_LIST_HEAD(&chunk->trees);
151 chunk->count = count;
152 atomic_long_set(&chunk->refs, 1);
153 for (i = 0; i < count; i++) {
154 INIT_LIST_HEAD(&chunk->owners[i].list);
155 chunk->owners[i].index = i;
157 fsnotify_init_mark(&chunk->mark, audit_tree_destroy_watch);
158 chunk->mark.mask = FS_IN_IGNORED;
162 enum {HASH_SIZE = 128};
163 static struct list_head chunk_hash_heads[HASH_SIZE];
164 static __cacheline_aligned_in_smp DEFINE_SPINLOCK(hash_lock);
166 /* Function to return search key in our hash from inode. */
167 static unsigned long inode_to_key(const struct inode *inode)
169 return (unsigned long)inode;
173 * Function to return search key in our hash from chunk. Key 0 is special and
174 * should never be present in the hash.
176 static unsigned long chunk_to_key(struct audit_chunk *chunk)
179 * We have a reference to the mark so it should be attached to a
182 if (WARN_ON_ONCE(!chunk->mark.connector))
184 return (unsigned long)chunk->mark.connector->inode;
187 static inline struct list_head *chunk_hash(unsigned long key)
189 unsigned long n = key / L1_CACHE_BYTES;
190 return chunk_hash_heads + n % HASH_SIZE;
193 /* hash_lock & entry->lock is held by caller */
194 static void insert_hash(struct audit_chunk *chunk)
196 unsigned long key = chunk_to_key(chunk);
197 struct list_head *list;
199 if (!(chunk->mark.flags & FSNOTIFY_MARK_FLAG_ATTACHED))
201 list = chunk_hash(key);
202 list_add_rcu(&chunk->hash, list);
205 /* called under rcu_read_lock */
206 struct audit_chunk *audit_tree_lookup(const struct inode *inode)
208 unsigned long key = inode_to_key(inode);
209 struct list_head *list = chunk_hash(key);
210 struct audit_chunk *p;
212 list_for_each_entry_rcu(p, list, hash) {
213 if (chunk_to_key(p) == key) {
214 atomic_long_inc(&p->refs);
221 bool audit_tree_match(struct audit_chunk *chunk, struct audit_tree *tree)
224 for (n = 0; n < chunk->count; n++)
225 if (chunk->owners[n].owner == tree)
230 /* tagging and untagging inodes with trees */
232 static struct audit_chunk *find_chunk(struct node *p)
234 int index = p->index & ~(1U<<31);
236 return container_of(p, struct audit_chunk, owners[0]);
239 static void untag_chunk(struct node *p)
241 struct audit_chunk *chunk = find_chunk(p);
242 struct fsnotify_mark *entry = &chunk->mark;
243 struct audit_chunk *new = NULL;
244 struct audit_tree *owner;
245 int size = chunk->count - 1;
248 fsnotify_get_mark(entry);
250 spin_unlock(&hash_lock);
253 new = alloc_chunk(size);
255 mutex_lock(&entry->group->mark_mutex);
256 spin_lock(&entry->lock);
258 * mark_mutex protects mark from getting detached and thus also from
259 * mark->connector->inode getting NULL.
261 if (chunk->dead || !(entry->flags & FSNOTIFY_MARK_FLAG_ATTACHED)) {
262 spin_unlock(&entry->lock);
263 mutex_unlock(&entry->group->mark_mutex);
273 spin_lock(&hash_lock);
274 list_del_init(&chunk->trees);
275 if (owner->root == chunk)
277 list_del_init(&p->list);
278 list_del_rcu(&chunk->hash);
279 spin_unlock(&hash_lock);
280 spin_unlock(&entry->lock);
281 mutex_unlock(&entry->group->mark_mutex);
282 fsnotify_destroy_mark(entry, audit_tree_group);
289 if (fsnotify_add_mark_locked(&new->mark, entry->group,
290 entry->connector->inode, NULL, 1)) {
291 fsnotify_put_mark(&new->mark);
296 spin_lock(&hash_lock);
297 list_replace_init(&chunk->trees, &new->trees);
298 if (owner->root == chunk) {
299 list_del_init(&owner->same_root);
303 for (i = j = 0; j <= size; i++, j++) {
304 struct audit_tree *s;
305 if (&chunk->owners[j] == p) {
306 list_del_init(&p->list);
310 s = chunk->owners[j].owner;
311 new->owners[i].owner = s;
312 new->owners[i].index = chunk->owners[j].index - j + i;
313 if (!s) /* result of earlier fallback */
316 list_replace_init(&chunk->owners[j].list, &new->owners[i].list);
319 list_replace_rcu(&chunk->hash, &new->hash);
320 list_for_each_entry(owner, &new->trees, same_root)
322 spin_unlock(&hash_lock);
323 spin_unlock(&entry->lock);
324 mutex_unlock(&entry->group->mark_mutex);
325 fsnotify_destroy_mark(entry, audit_tree_group);
326 fsnotify_put_mark(&new->mark); /* drop initial reference */
330 // do the best we can
331 spin_lock(&hash_lock);
332 if (owner->root == chunk) {
333 list_del_init(&owner->same_root);
336 list_del_init(&p->list);
339 spin_unlock(&hash_lock);
340 spin_unlock(&entry->lock);
341 mutex_unlock(&entry->group->mark_mutex);
343 fsnotify_put_mark(entry);
344 spin_lock(&hash_lock);
347 static int create_chunk(struct inode *inode, struct audit_tree *tree)
349 struct fsnotify_mark *entry;
350 struct audit_chunk *chunk = alloc_chunk(1);
354 entry = &chunk->mark;
355 if (fsnotify_add_mark(entry, audit_tree_group, inode, NULL, 0)) {
356 fsnotify_put_mark(entry);
360 spin_lock(&entry->lock);
361 spin_lock(&hash_lock);
363 spin_unlock(&hash_lock);
365 spin_unlock(&entry->lock);
366 fsnotify_destroy_mark(entry, audit_tree_group);
367 fsnotify_put_mark(entry);
370 chunk->owners[0].index = (1U << 31);
371 chunk->owners[0].owner = tree;
373 list_add(&chunk->owners[0].list, &tree->chunks);
376 list_add(&tree->same_root, &chunk->trees);
379 spin_unlock(&hash_lock);
380 spin_unlock(&entry->lock);
381 fsnotify_put_mark(entry); /* drop initial reference */
385 /* the first tagged inode becomes root of tree */
386 static int tag_chunk(struct inode *inode, struct audit_tree *tree)
388 struct fsnotify_mark *old_entry, *chunk_entry;
389 struct audit_tree *owner;
390 struct audit_chunk *chunk, *old;
394 old_entry = fsnotify_find_inode_mark(audit_tree_group, inode);
396 return create_chunk(inode, tree);
398 old = container_of(old_entry, struct audit_chunk, mark);
400 /* are we already there? */
401 spin_lock(&hash_lock);
402 for (n = 0; n < old->count; n++) {
403 if (old->owners[n].owner == tree) {
404 spin_unlock(&hash_lock);
405 fsnotify_put_mark(old_entry);
409 spin_unlock(&hash_lock);
411 chunk = alloc_chunk(old->count + 1);
413 fsnotify_put_mark(old_entry);
417 chunk_entry = &chunk->mark;
419 mutex_lock(&old_entry->group->mark_mutex);
420 spin_lock(&old_entry->lock);
422 * mark_mutex protects mark from getting detached and thus also from
423 * mark->connector->inode getting NULL.
425 if (!(old_entry->flags & FSNOTIFY_MARK_FLAG_ATTACHED)) {
426 /* old_entry is being shot, lets just lie */
427 spin_unlock(&old_entry->lock);
428 mutex_unlock(&old_entry->group->mark_mutex);
429 fsnotify_put_mark(old_entry);
434 if (fsnotify_add_mark_locked(chunk_entry, old_entry->group,
435 old_entry->connector->inode, NULL, 1)) {
436 spin_unlock(&old_entry->lock);
437 mutex_unlock(&old_entry->group->mark_mutex);
438 fsnotify_put_mark(chunk_entry);
439 fsnotify_put_mark(old_entry);
443 /* even though we hold old_entry->lock, this is safe since chunk_entry->lock could NEVER have been grabbed before */
444 spin_lock(&chunk_entry->lock);
445 spin_lock(&hash_lock);
447 /* we now hold old_entry->lock, chunk_entry->lock, and hash_lock */
449 spin_unlock(&hash_lock);
451 spin_unlock(&chunk_entry->lock);
452 spin_unlock(&old_entry->lock);
453 mutex_unlock(&old_entry->group->mark_mutex);
455 fsnotify_destroy_mark(chunk_entry, audit_tree_group);
457 fsnotify_put_mark(chunk_entry);
458 fsnotify_put_mark(old_entry);
461 list_replace_init(&old->trees, &chunk->trees);
462 for (n = 0, p = chunk->owners; n < old->count; n++, p++) {
463 struct audit_tree *s = old->owners[n].owner;
465 p->index = old->owners[n].index;
466 if (!s) /* result of fallback in untag */
469 list_replace_init(&old->owners[n].list, &p->list);
471 p->index = (chunk->count - 1) | (1U<<31);
474 list_add(&p->list, &tree->chunks);
475 list_replace_rcu(&old->hash, &chunk->hash);
476 list_for_each_entry(owner, &chunk->trees, same_root)
481 list_add(&tree->same_root, &chunk->trees);
483 spin_unlock(&hash_lock);
484 spin_unlock(&chunk_entry->lock);
485 spin_unlock(&old_entry->lock);
486 mutex_unlock(&old_entry->group->mark_mutex);
487 fsnotify_destroy_mark(old_entry, audit_tree_group);
488 fsnotify_put_mark(chunk_entry); /* drop initial reference */
489 fsnotify_put_mark(old_entry); /* pair to fsnotify_find mark_entry */
493 static void audit_tree_log_remove_rule(struct audit_krule *rule)
495 struct audit_buffer *ab;
497 ab = audit_log_start(NULL, GFP_KERNEL, AUDIT_CONFIG_CHANGE);
500 audit_log_format(ab, "op=remove_rule");
501 audit_log_format(ab, " dir=");
502 audit_log_untrustedstring(ab, rule->tree->pathname);
503 audit_log_key(ab, rule->filterkey);
504 audit_log_format(ab, " list=%d res=1", rule->listnr);
508 static void kill_rules(struct audit_tree *tree)
510 struct audit_krule *rule, *next;
511 struct audit_entry *entry;
513 list_for_each_entry_safe(rule, next, &tree->rules, rlist) {
514 entry = container_of(rule, struct audit_entry, rule);
516 list_del_init(&rule->rlist);
518 /* not a half-baked one */
519 audit_tree_log_remove_rule(rule);
521 audit_remove_mark(entry->rule.exe);
523 list_del_rcu(&entry->list);
524 list_del(&entry->rule.list);
525 call_rcu(&entry->rcu, audit_free_rule_rcu);
531 * finish killing struct audit_tree
533 static void prune_one(struct audit_tree *victim)
535 spin_lock(&hash_lock);
536 while (!list_empty(&victim->chunks)) {
539 p = list_entry(victim->chunks.next, struct node, list);
543 spin_unlock(&hash_lock);
547 /* trim the uncommitted chunks from tree */
549 static void trim_marked(struct audit_tree *tree)
551 struct list_head *p, *q;
552 spin_lock(&hash_lock);
554 spin_unlock(&hash_lock);
558 for (p = tree->chunks.next; p != &tree->chunks; p = q) {
559 struct node *node = list_entry(p, struct node, list);
561 if (node->index & (1U<<31)) {
563 list_add(p, &tree->chunks);
567 while (!list_empty(&tree->chunks)) {
570 node = list_entry(tree->chunks.next, struct node, list);
572 /* have we run out of marked? */
573 if (!(node->index & (1U<<31)))
578 if (!tree->root && !tree->goner) {
580 spin_unlock(&hash_lock);
581 mutex_lock(&audit_filter_mutex);
583 list_del_init(&tree->list);
584 mutex_unlock(&audit_filter_mutex);
587 spin_unlock(&hash_lock);
591 static void audit_schedule_prune(void);
593 /* called with audit_filter_mutex */
594 int audit_remove_tree_rule(struct audit_krule *rule)
596 struct audit_tree *tree;
599 spin_lock(&hash_lock);
600 list_del_init(&rule->rlist);
601 if (list_empty(&tree->rules) && !tree->goner) {
603 list_del_init(&tree->same_root);
605 list_move(&tree->list, &prune_list);
607 spin_unlock(&hash_lock);
608 audit_schedule_prune();
612 spin_unlock(&hash_lock);
618 static int compare_root(struct vfsmount *mnt, void *arg)
620 return inode_to_key(d_backing_inode(mnt->mnt_root)) ==
624 void audit_trim_trees(void)
626 struct list_head cursor;
628 mutex_lock(&audit_filter_mutex);
629 list_add(&cursor, &tree_list);
630 while (cursor.next != &tree_list) {
631 struct audit_tree *tree;
633 struct vfsmount *root_mnt;
637 tree = container_of(cursor.next, struct audit_tree, list);
640 list_add(&cursor, &tree->list);
641 mutex_unlock(&audit_filter_mutex);
643 err = kern_path(tree->pathname, 0, &path);
647 root_mnt = collect_mounts(&path);
649 if (IS_ERR(root_mnt))
652 spin_lock(&hash_lock);
653 list_for_each_entry(node, &tree->chunks, list) {
654 struct audit_chunk *chunk = find_chunk(node);
655 /* this could be NULL if the watch is dying else where... */
656 node->index |= 1U<<31;
657 if (iterate_mounts(compare_root,
658 (void *)chunk_to_key(chunk),
660 node->index &= ~(1U<<31);
662 spin_unlock(&hash_lock);
664 drop_collected_mounts(root_mnt);
667 mutex_lock(&audit_filter_mutex);
670 mutex_unlock(&audit_filter_mutex);
673 int audit_make_tree(struct audit_krule *rule, char *pathname, u32 op)
676 if (pathname[0] != '/' ||
677 rule->listnr != AUDIT_FILTER_EXIT ||
679 rule->inode_f || rule->watch || rule->tree)
681 rule->tree = alloc_tree(pathname);
687 void audit_put_tree(struct audit_tree *tree)
692 static int tag_mount(struct vfsmount *mnt, void *arg)
694 return tag_chunk(d_backing_inode(mnt->mnt_root), arg);
698 * That gets run when evict_chunk() ends up needing to kill audit_tree.
699 * Runs from a separate thread.
701 static int prune_tree_thread(void *unused)
704 if (list_empty(&prune_list)) {
705 set_current_state(TASK_INTERRUPTIBLE);
709 mutex_lock(&audit_cmd_mutex);
710 mutex_lock(&audit_filter_mutex);
712 while (!list_empty(&prune_list)) {
713 struct audit_tree *victim;
715 victim = list_entry(prune_list.next,
716 struct audit_tree, list);
717 list_del_init(&victim->list);
719 mutex_unlock(&audit_filter_mutex);
723 mutex_lock(&audit_filter_mutex);
726 mutex_unlock(&audit_filter_mutex);
727 mutex_unlock(&audit_cmd_mutex);
732 static int audit_launch_prune(void)
736 prune_thread = kthread_run(prune_tree_thread, NULL,
738 if (IS_ERR(prune_thread)) {
739 pr_err("cannot start thread audit_prune_tree");
746 /* called with audit_filter_mutex */
747 int audit_add_tree_rule(struct audit_krule *rule)
749 struct audit_tree *seed = rule->tree, *tree;
751 struct vfsmount *mnt;
755 list_for_each_entry(tree, &tree_list, list) {
756 if (!strcmp(seed->pathname, tree->pathname)) {
759 list_add(&rule->rlist, &tree->rules);
764 list_add(&tree->list, &tree_list);
765 list_add(&rule->rlist, &tree->rules);
766 /* do not set rule->tree yet */
767 mutex_unlock(&audit_filter_mutex);
769 if (unlikely(!prune_thread)) {
770 err = audit_launch_prune();
775 err = kern_path(tree->pathname, 0, &path);
778 mnt = collect_mounts(&path);
786 err = iterate_mounts(tag_mount, tree, mnt);
787 drop_collected_mounts(mnt);
791 spin_lock(&hash_lock);
792 list_for_each_entry(node, &tree->chunks, list)
793 node->index &= ~(1U<<31);
794 spin_unlock(&hash_lock);
800 mutex_lock(&audit_filter_mutex);
801 if (list_empty(&rule->rlist)) {
810 mutex_lock(&audit_filter_mutex);
811 list_del_init(&tree->list);
812 list_del_init(&tree->rules);
817 int audit_tag_tree(char *old, char *new)
819 struct list_head cursor, barrier;
821 struct path path1, path2;
822 struct vfsmount *tagged;
825 err = kern_path(new, 0, &path2);
828 tagged = collect_mounts(&path2);
831 return PTR_ERR(tagged);
833 err = kern_path(old, 0, &path1);
835 drop_collected_mounts(tagged);
839 mutex_lock(&audit_filter_mutex);
840 list_add(&barrier, &tree_list);
841 list_add(&cursor, &barrier);
843 while (cursor.next != &tree_list) {
844 struct audit_tree *tree;
847 tree = container_of(cursor.next, struct audit_tree, list);
850 list_add(&cursor, &tree->list);
851 mutex_unlock(&audit_filter_mutex);
853 err = kern_path(tree->pathname, 0, &path2);
855 good_one = path_is_under(&path1, &path2);
861 mutex_lock(&audit_filter_mutex);
865 failed = iterate_mounts(tag_mount, tree, tagged);
868 mutex_lock(&audit_filter_mutex);
872 mutex_lock(&audit_filter_mutex);
873 spin_lock(&hash_lock);
875 list_del(&tree->list);
876 list_add(&tree->list, &tree_list);
878 spin_unlock(&hash_lock);
882 while (barrier.prev != &tree_list) {
883 struct audit_tree *tree;
885 tree = container_of(barrier.prev, struct audit_tree, list);
887 list_del(&tree->list);
888 list_add(&tree->list, &barrier);
889 mutex_unlock(&audit_filter_mutex);
893 spin_lock(&hash_lock);
894 list_for_each_entry(node, &tree->chunks, list)
895 node->index &= ~(1U<<31);
896 spin_unlock(&hash_lock);
902 mutex_lock(&audit_filter_mutex);
906 mutex_unlock(&audit_filter_mutex);
908 drop_collected_mounts(tagged);
913 static void audit_schedule_prune(void)
915 wake_up_process(prune_thread);
919 * ... and that one is done if evict_chunk() decides to delay until the end
920 * of syscall. Runs synchronously.
922 void audit_kill_trees(struct list_head *list)
924 mutex_lock(&audit_cmd_mutex);
925 mutex_lock(&audit_filter_mutex);
927 while (!list_empty(list)) {
928 struct audit_tree *victim;
930 victim = list_entry(list->next, struct audit_tree, list);
932 list_del_init(&victim->list);
934 mutex_unlock(&audit_filter_mutex);
938 mutex_lock(&audit_filter_mutex);
941 mutex_unlock(&audit_filter_mutex);
942 mutex_unlock(&audit_cmd_mutex);
946 * Here comes the stuff asynchronous to auditctl operations
949 static void evict_chunk(struct audit_chunk *chunk)
951 struct audit_tree *owner;
952 struct list_head *postponed = audit_killed_trees();
960 mutex_lock(&audit_filter_mutex);
961 spin_lock(&hash_lock);
962 while (!list_empty(&chunk->trees)) {
963 owner = list_entry(chunk->trees.next,
964 struct audit_tree, same_root);
967 list_del_init(&owner->same_root);
968 spin_unlock(&hash_lock);
971 list_move(&owner->list, &prune_list);
974 list_move(&owner->list, postponed);
976 spin_lock(&hash_lock);
978 list_del_rcu(&chunk->hash);
979 for (n = 0; n < chunk->count; n++)
980 list_del_init(&chunk->owners[n].list);
981 spin_unlock(&hash_lock);
982 mutex_unlock(&audit_filter_mutex);
984 audit_schedule_prune();
987 static int audit_tree_handle_event(struct fsnotify_group *group,
988 struct inode *to_tell,
989 struct fsnotify_mark *inode_mark,
990 struct fsnotify_mark *vfsmount_mark,
991 u32 mask, const void *data, int data_type,
992 const unsigned char *file_name, u32 cookie,
993 struct fsnotify_iter_info *iter_info)
998 static void audit_tree_freeing_mark(struct fsnotify_mark *entry, struct fsnotify_group *group)
1000 struct audit_chunk *chunk = container_of(entry, struct audit_chunk, mark);
1005 * We are guaranteed to have at least one reference to the mark from
1006 * either the inode or the caller of fsnotify_destroy_mark().
1008 BUG_ON(atomic_read(&entry->refcnt) < 1);
1011 static const struct fsnotify_ops audit_tree_ops = {
1012 .handle_event = audit_tree_handle_event,
1013 .freeing_mark = audit_tree_freeing_mark,
1016 static int __init audit_tree_init(void)
1020 audit_tree_group = fsnotify_alloc_group(&audit_tree_ops);
1021 if (IS_ERR(audit_tree_group))
1022 audit_panic("cannot initialize fsnotify group for rectree watches");
1024 for (i = 0; i < HASH_SIZE; i++)
1025 INIT_LIST_HEAD(&chunk_hash_heads[i]);
1029 __initcall(audit_tree_init);