Merge remote-tracking branch 'nvdimm/libnvdimm-for-next'
authorStephen Rothwell <sfr@canb.auug.org.au>
Thu, 5 Nov 2015 04:33:45 +0000 (15:33 +1100)
committerStephen Rothwell <sfr@canb.auug.org.au>
Thu, 5 Nov 2015 04:33:45 +0000 (15:33 +1100)
1  2 
arch/x86/mm/init.c
arch/x86/mm/init_64.c
kernel/memremap.c

diff --combined arch/x86/mm/init.c
@@@ -354,7 -354,7 +354,7 @@@ static int __meminit split_mem_range(st
        }
  
        for (i = 0; i < nr_range; i++)
-               printk(KERN_DEBUG " [mem %#010lx-%#010lx] page %s\n",
+               pr_debug(" [mem %#010lx-%#010lx] page %s\n",
                                mr[i].start, mr[i].end - 1,
                                page_size_string(&mr[i]));
  
@@@ -401,7 -401,7 +401,7 @@@ unsigned long __init_refok init_memory_
        unsigned long ret = 0;
        int nr_range, i;
  
-       pr_info("init_memory_mapping: [mem %#010lx-%#010lx]\n",
+       pr_debug("init_memory_mapping: [mem %#010lx-%#010lx]\n",
               start, end - 1);
  
        memset(mr, 0, sizeof(mr));
@@@ -693,12 -693,14 +693,12 @@@ void free_initmem(void
  #ifdef CONFIG_BLK_DEV_INITRD
  void __init free_initrd_mem(unsigned long start, unsigned long end)
  {
 -#ifdef CONFIG_MICROCODE_EARLY
        /*
         * Remember, initrd memory may contain microcode or other useful things.
         * Before we lose initrd mem, we need to find a place to hold them
         * now that normal virtual memory is enabled.
         */
        save_microcode_in_initrd();
 -#endif
  
        /*
         * end could be not aligned, and We can not align that,
diff --combined arch/x86/mm/init_64.c
@@@ -1150,8 -1150,6 +1150,8 @@@ void mark_rodata_ro(void
        free_init_pages("unused kernel",
                        (unsigned long) __va(__pa_symbol(rodata_end)),
                        (unsigned long) __va(__pa_symbol(_sdata)));
 +
 +      debug_checkwx();
  }
  
  #endif
@@@ -1270,7 -1268,7 +1270,7 @@@ static int __meminit vmemmap_populate_h
                                /* check to see if we have contiguous blocks */
                                if (p_end != p || node_start != node) {
                                        if (p_start)
-                                               printk(KERN_DEBUG " [%lx-%lx] PMD -> [%p-%p] on node %d\n",
+                                               pr_debug(" [%lx-%lx] PMD -> [%p-%p] on node %d\n",
                                                       addr_start, addr_end-1, p_start, p_end-1, node_start);
                                        addr_start = addr;
                                        node_start = node;
@@@ -1368,7 -1366,7 +1368,7 @@@ void register_page_bootmem_memmap(unsig
  void __meminit vmemmap_populate_print_last(void)
  {
        if (p_start) {
-               printk(KERN_DEBUG " [%lx-%lx] PMD -> [%p-%p] on node %d\n",
+               pr_debug(" [%lx-%lx] PMD -> [%p-%p] on node %d\n",
                        addr_start, addr_end-1, p_start, p_end-1, node_start);
                p_start = NULL;
                p_end = NULL;
diff --combined kernel/memremap.c
@@@ -24,16 -24,6 +24,16 @@@ __weak void __iomem *ioremap_cache(reso
  }
  #endif
  
 +static void *try_ram_remap(resource_size_t offset, size_t size)
 +{
 +      struct page *page = pfn_to_page(offset >> PAGE_SHIFT);
 +
 +      /* In the simple case just return the existing linear address */
 +      if (!PageHighMem(page))
 +              return __va(offset);
 +      return NULL; /* fallback to ioremap_cache */
 +}
 +
  /**
   * memremap() - remap an iomem_resource as cacheable memory
   * @offset: iomem resource start address
@@@ -76,8 -66,8 +76,8 @@@ void *memremap(resource_size_t offset, 
                 * the requested range is potentially in "System RAM"
                 */
                if (is_ram == REGION_INTERSECTS)
 -                      addr = __va(offset);
 -              else
 +                      addr = try_ram_remap(offset, size);
 +              if (!addr)
                        addr = ioremap_cache(offset, size);
        }
  
@@@ -124,9 -114,10 +124,10 @@@ void *devm_memremap(struct device *dev
  {
        void **ptr, *addr;
  
-       ptr = devres_alloc(devm_memremap_release, sizeof(*ptr), GFP_KERNEL);
+       ptr = devres_alloc_node(devm_memremap_release, sizeof(*ptr), GFP_KERNEL,
+                       dev_to_node(dev));
        if (!ptr)
-               return NULL;
+               return ERR_PTR(-ENOMEM);
  
        addr = memremap(offset, size, flags);
        if (addr) {
@@@ -141,9 -132,8 +142,8 @@@ EXPORT_SYMBOL(devm_memremap)
  
  void devm_memunmap(struct device *dev, void *addr)
  {
-       WARN_ON(devres_destroy(dev, devm_memremap_release, devm_memremap_match,
-                              addr));
-       memunmap(addr);
+       WARN_ON(devres_release(dev, devm_memremap_release,
+                               devm_memremap_match, addr));
  }
  EXPORT_SYMBOL(devm_memunmap);
  
@@@ -176,8 -166,8 +176,8 @@@ void *devm_memremap_pages(struct devic
        if (is_ram == REGION_INTERSECTS)
                return __va(res->start);
  
-       page_map = devres_alloc(devm_memremap_pages_release,
-                       sizeof(*page_map), GFP_KERNEL);
+       page_map = devres_alloc_node(devm_memremap_pages_release,
+                       sizeof(*page_map), GFP_KERNEL, dev_to_node(dev));
        if (!page_map)
                return ERR_PTR(-ENOMEM);
  
  
        nid = dev_to_node(dev);
        if (nid < 0)
-               nid = 0;
+               nid = numa_mem_id();
  
        error = arch_add_memory(nid, res->start, resource_size(res), true);
        if (error) {