]> git.kernelconcepts.de Git - karo-tx-linux.git/blobdiff - mm/interval_tree.c
Merge branch 'for-3.7' of git://linux-nfs.org/~bfields/linux
[karo-tx-linux.git] / mm / interval_tree.c
index 7dc565660e562b8e73acdbbdef7df8a43d0b94d8..4a5822a586e6a57c6bdb19842096011b76b94de5 100644 (file)
@@ -8,40 +8,39 @@
 
 #include <linux/mm.h>
 #include <linux/fs.h>
+#include <linux/rmap.h>
+#include <linux/interval_tree_generic.h>
 
-#define ITSTRUCT   struct vm_area_struct
-#define ITRB       shared.linear.rb
-#define ITTYPE     unsigned long
-#define ITSUBTREE  shared.linear.rb_subtree_last
-#define ITSTART(n) ((n)->vm_pgoff)
-#define ITLAST(n)  ((n)->vm_pgoff + \
-                   (((n)->vm_end - (n)->vm_start) >> PAGE_SHIFT) - 1)
-#define ITSTATIC
-#define ITPREFIX   vma_interval_tree
+static inline unsigned long vma_start_pgoff(struct vm_area_struct *v)
+{
+       return v->vm_pgoff;
+}
+
+static inline unsigned long vma_last_pgoff(struct vm_area_struct *v)
+{
+       return v->vm_pgoff + ((v->vm_end - v->vm_start) >> PAGE_SHIFT) - 1;
+}
 
-#include <linux/interval_tree_tmpl.h>
+INTERVAL_TREE_DEFINE(struct vm_area_struct, shared.linear.rb,
+                    unsigned long, shared.linear.rb_subtree_last,
+                    vma_start_pgoff, vma_last_pgoff,, vma_interval_tree)
 
-/* Insert old immediately after vma in the interval tree */
-void vma_interval_tree_add(struct vm_area_struct *vma,
-                          struct vm_area_struct *old,
-                          struct address_space *mapping)
+/* Insert node immediately after prev in the interval tree */
+void vma_interval_tree_insert_after(struct vm_area_struct *node,
+                                   struct vm_area_struct *prev,
+                                   struct rb_root *root)
 {
        struct rb_node **link;
        struct vm_area_struct *parent;
-       unsigned long last;
-
-       if (unlikely(vma->vm_flags & VM_NONLINEAR)) {
-               list_add(&vma->shared.nonlinear, &old->shared.nonlinear);
-               return;
-       }
+       unsigned long last = vma_last_pgoff(node);
 
-       last = ITLAST(vma);
+       VM_BUG_ON(vma_start_pgoff(node) != vma_start_pgoff(prev));
 
-       if (!old->shared.linear.rb.rb_right) {
-               parent = old;
-               link = &old->shared.linear.rb.rb_right;
+       if (!prev->shared.linear.rb.rb_right) {
+               parent = prev;
+               link = &prev->shared.linear.rb.rb_right;
        } else {
-               parent = rb_entry(old->shared.linear.rb.rb_right,
+               parent = rb_entry(prev->shared.linear.rb.rb_right,
                                  struct vm_area_struct, shared.linear.rb);
                if (parent->shared.linear.rb_subtree_last < last)
                        parent->shared.linear.rb_subtree_last = last;
@@ -54,8 +53,60 @@ void vma_interval_tree_add(struct vm_area_struct *vma,
                link = &parent->shared.linear.rb.rb_left;
        }
 
-       vma->shared.linear.rb_subtree_last = last;
-       rb_link_node(&vma->shared.linear.rb, &parent->shared.linear.rb, link);
-       rb_insert_augmented(&vma->shared.linear.rb, &mapping->i_mmap,
-                           &vma_interval_tree_augment_callbacks);
+       node->shared.linear.rb_subtree_last = last;
+       rb_link_node(&node->shared.linear.rb, &parent->shared.linear.rb, link);
+       rb_insert_augmented(&node->shared.linear.rb, root,
+                           &vma_interval_tree_augment);
+}
+
+static inline unsigned long avc_start_pgoff(struct anon_vma_chain *avc)
+{
+       return vma_start_pgoff(avc->vma);
+}
+
+static inline unsigned long avc_last_pgoff(struct anon_vma_chain *avc)
+{
+       return vma_last_pgoff(avc->vma);
+}
+
+INTERVAL_TREE_DEFINE(struct anon_vma_chain, rb, unsigned long, rb_subtree_last,
+                    avc_start_pgoff, avc_last_pgoff,
+                    static inline, __anon_vma_interval_tree)
+
+void anon_vma_interval_tree_insert(struct anon_vma_chain *node,
+                                  struct rb_root *root)
+{
+#ifdef CONFIG_DEBUG_VM_RB
+       node->cached_vma_start = avc_start_pgoff(node);
+       node->cached_vma_last = avc_last_pgoff(node);
+#endif
+       __anon_vma_interval_tree_insert(node, root);
+}
+
+void anon_vma_interval_tree_remove(struct anon_vma_chain *node,
+                                  struct rb_root *root)
+{
+       __anon_vma_interval_tree_remove(node, root);
+}
+
+struct anon_vma_chain *
+anon_vma_interval_tree_iter_first(struct rb_root *root,
+                                 unsigned long first, unsigned long last)
+{
+       return __anon_vma_interval_tree_iter_first(root, first, last);
+}
+
+struct anon_vma_chain *
+anon_vma_interval_tree_iter_next(struct anon_vma_chain *node,
+                                unsigned long first, unsigned long last)
+{
+       return __anon_vma_interval_tree_iter_next(node, first, last);
+}
+
+#ifdef CONFIG_DEBUG_VM_RB
+void anon_vma_interval_tree_verify(struct anon_vma_chain *node)
+{
+       WARN_ON_ONCE(node->cached_vma_start != avc_start_pgoff(node));
+       WARN_ON_ONCE(node->cached_vma_last != avc_last_pgoff(node));
 }
+#endif