]> git.kernelconcepts.de Git - karo-tx-linux.git/commitdiff
Merge branch 'x86-pat-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git...
authorLinus Torvalds <torvalds@linux-foundation.org>
Tue, 18 May 2010 16:28:04 +0000 (09:28 -0700)
committerLinus Torvalds <torvalds@linux-foundation.org>
Tue, 18 May 2010 16:28:04 +0000 (09:28 -0700)
* 'x86-pat-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/linux-2.6-tip:
  x86, pat: Update the page flags for memtype atomically instead of using memtype_lock
  x86, pat: In rbt_memtype_check_insert(), update new->type only if valid
  x86, pat: Migrate to rbtree only backend for pat memtype management
  x86, pat: Preparatory changes in pat.c for bigger rbtree change
  rbtree: Add support for augmented rbtrees

1  2 
arch/x86/mm/pat.c
include/linux/rbtree.h

diff --combined arch/x86/mm/pat.c
index edc8b95afc1a345f97c4abe6daf9da628b831d2d,501fc60e5e4d76c341e01b53a30083caeed56bf4..bbe5502ee1cbdaa7eaf5d9de15454b143b67b626
@@@ -12,7 -12,7 +12,7 @@@
  #include <linux/debugfs.h>
  #include <linux/kernel.h>
  #include <linux/module.h>
 -#include <linux/gfp.h>
 +#include <linux/slab.h>
  #include <linux/mm.h>
  #include <linux/fs.h>
  #include <linux/rbtree.h>
@@@ -30,6 -30,8 +30,8 @@@
  #include <asm/pat.h>
  #include <asm/io.h>
  
+ #include "pat_internal.h"
  #ifdef CONFIG_X86_PAT
  int __read_mostly pat_enabled = 1;
  
@@@ -53,19 -55,15 +55,15 @@@ static inline void pat_disable(const ch
  #endif
  
  
static int debug_enable;
int pat_debug_enable;
  
  static int __init pat_debug_setup(char *str)
  {
-       debug_enable = 1;
+       pat_debug_enable = 1;
        return 0;
  }
  __setup("debugpat", pat_debug_setup);
  
- #define dprintk(fmt, arg...) \
-       do { if (debug_enable) printk(KERN_INFO fmt, ##arg); } while (0)
  static u64 __read_mostly boot_pat_state;
  
  enum {
@@@ -132,84 -130,7 +130,7 @@@ void pat_init(void
  
  #undef PAT
  
- static char *cattr_name(unsigned long flags)
- {
-       switch (flags & _PAGE_CACHE_MASK) {
-       case _PAGE_CACHE_UC:            return "uncached";
-       case _PAGE_CACHE_UC_MINUS:      return "uncached-minus";
-       case _PAGE_CACHE_WB:            return "write-back";
-       case _PAGE_CACHE_WC:            return "write-combining";
-       default:                        return "broken";
-       }
- }
- /*
-  * The global memtype list keeps track of memory type for specific
-  * physical memory areas. Conflicting memory types in different
-  * mappings can cause CPU cache corruption. To avoid this we keep track.
-  *
-  * The list is sorted based on starting address and can contain multiple
-  * entries for each address (this allows reference counting for overlapping
-  * areas). All the aliases have the same cache attributes of course.
-  * Zero attributes are represented as holes.
-  *
-  * The data structure is a list that is also organized as an rbtree
-  * sorted on the start address of memtype range.
-  *
-  * memtype_lock protects both the linear list and rbtree.
-  */
- struct memtype {
-       u64                     start;
-       u64                     end;
-       unsigned long           type;
-       struct list_head        nd;
-       struct rb_node          rb;
- };
- static struct rb_root memtype_rbroot = RB_ROOT;
- static LIST_HEAD(memtype_list);
- static DEFINE_SPINLOCK(memtype_lock); /* protects memtype list */
- static struct memtype *memtype_rb_search(struct rb_root *root, u64 start)
- {
-       struct rb_node *node = root->rb_node;
-       struct memtype *last_lower = NULL;
-       while (node) {
-               struct memtype *data = container_of(node, struct memtype, rb);
-               if (data->start < start) {
-                       last_lower = data;
-                       node = node->rb_right;
-               } else if (data->start > start) {
-                       node = node->rb_left;
-               } else
-                       return data;
-       }
-       /* Will return NULL if there is no entry with its start <= start */
-       return last_lower;
- }
- static void memtype_rb_insert(struct rb_root *root, struct memtype *data)
- {
-       struct rb_node **new = &(root->rb_node);
-       struct rb_node *parent = NULL;
-       while (*new) {
-               struct memtype *this = container_of(*new, struct memtype, rb);
-               parent = *new;
-               if (data->start <= this->start)
-                       new = &((*new)->rb_left);
-               else if (data->start > this->start)
-                       new = &((*new)->rb_right);
-       }
-       rb_link_node(&data->rb, parent, new);
-       rb_insert_color(&data->rb, root);
- }
+ static DEFINE_SPINLOCK(memtype_lock); /* protects memtype accesses */
  
  /*
   * Does intersection of PAT memory type and MTRR memory type and returns
@@@ -237,33 -158,6 +158,6 @@@ static unsigned long pat_x_mtrr_type(u6
        return req_type;
  }
  
- static int
- chk_conflict(struct memtype *new, struct memtype *entry, unsigned long *type)
- {
-       if (new->type != entry->type) {
-               if (type) {
-                       new->type = entry->type;
-                       *type = entry->type;
-               } else
-                       goto conflict;
-       }
-        /* check overlaps with more than one entry in the list */
-       list_for_each_entry_continue(entry, &memtype_list, nd) {
-               if (new->end <= entry->start)
-                       break;
-               else if (new->type != entry->type)
-                       goto conflict;
-       }
-       return 0;
-  conflict:
-       printk(KERN_INFO "%s:%d conflicting memory types "
-              "%Lx-%Lx %s<->%s\n", current->comm, current->pid, new->start,
-              new->end, cattr_name(new->type), cattr_name(entry->type));
-       return -EBUSY;
- }
  static int pat_pagerange_is_ram(unsigned long start, unsigned long end)
  {
        int ram_page = 0, not_rampage = 0;
   * Here we do two pass:
   * - Find the memtype of all the pages in the range, look for any conflicts
   * - In case of no conflicts, set the new memtype for pages in the range
-  *
-  * Caller must hold memtype_lock for atomicity.
   */
  static int reserve_ram_pages_type(u64 start, u64 end, unsigned long req_type,
                                  unsigned long *new_type)
@@@ -364,9 -256,8 +256,8 @@@ static int free_ram_pages_type(u64 star
  int reserve_memtype(u64 start, u64 end, unsigned long req_type,
                    unsigned long *new_type)
  {
-       struct memtype *new, *entry;
+       struct memtype *new;
        unsigned long actual_type;
-       struct list_head *where;
        int is_range_ram;
        int err = 0;
  
        is_range_ram = pat_pagerange_is_ram(start, end);
        if (is_range_ram == 1) {
  
-               spin_lock(&memtype_lock);
                err = reserve_ram_pages_type(start, end, req_type, new_type);
-               spin_unlock(&memtype_lock);
  
                return err;
        } else if (is_range_ram < 0) {
  
        spin_lock(&memtype_lock);
  
-       /* Search for existing mapping that overlaps the current range */
-       where = NULL;
-       list_for_each_entry(entry, &memtype_list, nd) {
-               if (end <= entry->start) {
-                       where = entry->nd.prev;
-                       break;
-               } else if (start <= entry->start) { /* end > entry->start */
-                       err = chk_conflict(new, entry, new_type);
-                       if (!err) {
-                               dprintk("Overlap at 0x%Lx-0x%Lx\n",
-                                       entry->start, entry->end);
-                               where = entry->nd.prev;
-                       }
-                       break;
-               } else if (start < entry->end) { /* start > entry->start */
-                       err = chk_conflict(new, entry, new_type);
-                       if (!err) {
-                               dprintk("Overlap at 0x%Lx-0x%Lx\n",
-                                       entry->start, entry->end);
-                               /*
-                                * Move to right position in the linked
-                                * list to add this new entry
-                                */
-                               list_for_each_entry_continue(entry,
-                                                       &memtype_list, nd) {
-                                       if (start <= entry->start) {
-                                               where = entry->nd.prev;
-                                               break;
-                                       }
-                               }
-                       }
-                       break;
-               }
-       }
+       err = rbt_memtype_check_insert(new, new_type);
        if (err) {
                printk(KERN_INFO "reserve_memtype failed 0x%Lx-0x%Lx, "
                       "track %s, req %s\n",
                return err;
        }
  
-       if (where)
-               list_add(&new->nd, where);
-       else
-               list_add_tail(&new->nd, &memtype_list);
-       memtype_rb_insert(&memtype_rbroot, new);
        spin_unlock(&memtype_lock);
  
        dprintk("reserve_memtype added 0x%Lx-0x%Lx, track %s, req %s, ret %s\n",
  
  int free_memtype(u64 start, u64 end)
  {
-       struct memtype *entry, *saved_entry;
        int err = -EINVAL;
        int is_range_ram;
  
        is_range_ram = pat_pagerange_is_ram(start, end);
        if (is_range_ram == 1) {
  
-               spin_lock(&memtype_lock);
                err = free_ram_pages_type(start, end);
-               spin_unlock(&memtype_lock);
  
                return err;
        } else if (is_range_ram < 0) {
        }
  
        spin_lock(&memtype_lock);
-       entry = memtype_rb_search(&memtype_rbroot, start);
-       if (unlikely(entry == NULL))
-               goto unlock_ret;
-       /*
-        * Saved entry points to an entry with start same or less than what
-        * we searched for. Now go through the list in both directions to look
-        * for the entry that matches with both start and end, with list stored
-        * in sorted start address
-        */
-       saved_entry = entry;
-       list_for_each_entry_from(entry, &memtype_list, nd) {
-               if (entry->start == start && entry->end == end) {
-                       rb_erase(&entry->rb, &memtype_rbroot);
-                       list_del(&entry->nd);
-                       kfree(entry);
-                       err = 0;
-                       break;
-               } else if (entry->start > start) {
-                       break;
-               }
-       }
-       if (!err)
-               goto unlock_ret;
-       entry = saved_entry;
-       list_for_each_entry_reverse(entry, &memtype_list, nd) {
-               if (entry->start == start && entry->end == end) {
-                       rb_erase(&entry->rb, &memtype_rbroot);
-                       list_del(&entry->nd);
-                       kfree(entry);
-                       err = 0;
-                       break;
-               } else if (entry->start < start) {
-                       break;
-               }
-       }
- unlock_ret:
+       err = rbt_memtype_erase(start, end);
        spin_unlock(&memtype_lock);
  
        if (err) {
@@@ -583,10 -388,8 +388,8 @@@ static unsigned long lookup_memtype(u6
  
        if (pat_pagerange_is_ram(paddr, paddr + PAGE_SIZE)) {
                struct page *page;
-               spin_lock(&memtype_lock);
                page = pfn_to_page(paddr >> PAGE_SHIFT);
                rettype = get_page_memtype(page);
-               spin_unlock(&memtype_lock);
                /*
                 * -1 from get_page_memtype() implies RAM page is in its
                 * default state and not reserved, and hence of type WB
  
        spin_lock(&memtype_lock);
  
-       entry = memtype_rb_search(&memtype_rbroot, paddr);
+       entry = rbt_memtype_lookup(paddr);
        if (entry != NULL)
                rettype = entry->type;
        else
@@@ -936,29 -739,25 +739,25 @@@ EXPORT_SYMBOL_GPL(pgprot_writecombine)
  
  #if defined(CONFIG_DEBUG_FS) && defined(CONFIG_X86_PAT)
  
- /* get Nth element of the linked list */
  static struct memtype *memtype_get_idx(loff_t pos)
  {
-       struct memtype *list_node, *print_entry;
-       int i = 1;
+       struct memtype *print_entry;
+       int ret;
  
-       print_entry  = kmalloc(sizeof(struct memtype), GFP_KERNEL);
+       print_entry  = kzalloc(sizeof(struct memtype), GFP_KERNEL);
        if (!print_entry)
                return NULL;
  
        spin_lock(&memtype_lock);
-       list_for_each_entry(list_node, &memtype_list, nd) {
-               if (pos == i) {
-                       *print_entry = *list_node;
-                       spin_unlock(&memtype_lock);
-                       return print_entry;
-               }
-               ++i;
-       }
+       ret = rbt_memtype_copy_nth_element(print_entry, pos);
        spin_unlock(&memtype_lock);
-       kfree(print_entry);
  
-       return NULL;
+       if (!ret) {
+               return print_entry;
+       } else {
+               kfree(print_entry);
+               return NULL;
+       }
  }
  
  static void *memtype_seq_start(struct seq_file *seq, loff_t *pos)
diff --combined include/linux/rbtree.h
index 5210a5c60877c2b0d08b831d9f2bdcb8ec1b2b3a,8e33a256ea0e17cf872556d6bce0e468764a4ad8..fe1872e5b37e1f8fab4d090f620c2660809d0a6b
  
    Some example of insert and search follows here. The search is a plain
    normal search over an ordered tree. The insert instead must be implemented
 -  int two steps: as first thing the code must insert the element in
 -  order as a red leaf in the tree, then the support library function
 -  rb_insert_color() must be called. Such function will do the
 -  not trivial work to rebalance the rbtree if necessary.
 +  in two steps: First, the code must insert the element in order as a red leaf
 +  in the tree, and then the support library function rb_insert_color() must
 +  be called. Such function will do the not trivial work to rebalance the
 +  rbtree, if necessary.
  
  -----------------------------------------------------------------------
  static inline struct page * rb_search_page_cache(struct inode * inode,
@@@ -110,6 -110,7 +110,7 @@@ struct rb_nod
  struct rb_root
  {
        struct rb_node *rb_node;
+       void (*augment_cb)(struct rb_node *node);
  };
  
  
@@@ -129,7 -130,9 +130,9 @@@ static inline void rb_set_color(struct 
        rb->rb_parent_color = (rb->rb_parent_color & ~1) | color;
  }
  
- #define RB_ROOT       (struct rb_root) { NULL, }
+ #define RB_ROOT       (struct rb_root) { NULL, NULL, }
+ #define RB_AUGMENT_ROOT(x)    (struct rb_root) { NULL, x}
  #define       rb_entry(ptr, type, member) container_of(ptr, type, member)
  
  #define RB_EMPTY_ROOT(root)   ((root)->rb_node == NULL)