]> git.kernelconcepts.de Git - karo-tx-linux.git/commitdiff
ARM: Fix ioremap() of address zero
authorRussell King <rmk+kernel@arm.linux.org.uk>
Sat, 25 Aug 2012 08:03:15 +0000 (09:03 +0100)
committerGreg Kroah-Hartman <gregkh@linuxfoundation.org>
Tue, 2 Oct 2012 17:29:52 +0000 (10:29 -0700)
commit a849088aa1552b1a28eea3daff599ee22a734ae3 upstream.

Murali Nalajala reports a regression that ioremapping address zero
results in an oops dump:

Unable to handle kernel paging request at virtual address fa200000
pgd = d4f80000
[fa200000] *pgd=00000000
Internal error: Oops: 5 [#1] PREEMPT SMP ARM
Modules linked in:
CPU: 0    Tainted: G        W (3.4.0-g3b5f728-00009-g638207a #13)
PC is at msm_pm_config_rst_vector_before_pc+0x8/0x30
LR is at msm_pm_boot_config_before_pc+0x18/0x20
pc : [<c0078f84>]    lr : [<c007903c>]    psr: a0000093
sp : c0837ef0  ip : cfe00000  fp : 0000000d
r10: da7efc17  r9 : 225c4278  r8 : 00000006
r7 : 0003c000  r6 : c085c824  r5 : 00000001  r4 : fa101000
r3 : fa200000  r2 : c095080c  r1 : 002250fc  r0 : 00000000
Flags: NzCv  IRQs off  FIQs on  Mode SVC_32  ISA ARM Segment kernel
Control: 10c5387d  Table: 25180059  DAC: 00000015
[<c0078f84>] (msm_pm_config_rst_vector_before_pc+0x8/0x30) from [<c007903c>] (msm_pm_boot_config_before_pc+0x18/0x20)
[<c007903c>] (msm_pm_boot_config_before_pc+0x18/0x20) from [<c007a55c>] (msm_pm_power_collapse+0x410/0xb04)
[<c007a55c>] (msm_pm_power_collapse+0x410/0xb04) from [<c007b17c>] (arch_idle+0x294/0x3e0)
[<c007b17c>] (arch_idle+0x294/0x3e0) from [<c000eed8>] (default_idle+0x18/0x2c)
[<c000eed8>] (default_idle+0x18/0x2c) from [<c000f254>] (cpu_idle+0x90/0xe4)
[<c000f254>] (cpu_idle+0x90/0xe4) from [<c057231c>] (rest_init+0x88/0xa0)
[<c057231c>] (rest_init+0x88/0xa0) from [<c07ff890>] (start_kernel+0x3a8/0x40c)
Code: c0704256 e12fff1e e59f2020 e5923000 (e5930000)

This is caused by the 'reserved' entries which we insert (see
19b52abe3c5d7 - ARM: 7438/1: fill possible PMD empty section gaps)
which get matched for physical address zero.

Resolve this by marking these reserved entries with a different flag.

Tested-by: Murali Nalajala <mnalajal@codeaurora.org>
Acked-by: Nicolas Pitre <nico@linaro.org>
Signed-off-by: Russell King <rmk+kernel@arm.linux.org.uk>
Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
arch/arm/mm/mm.h
arch/arm/mm/mmu.c

index 27f4a619b35d1c0039d7883c7804994c0540cdc6..d3f25c9aaf82aa5935e9206fbac0680de48e7ac4 100644 (file)
@@ -55,6 +55,9 @@ extern void __flush_dcache_page(struct address_space *mapping, struct page *page
 /* permanent static mappings from iotable_init() */
 #define VM_ARM_STATIC_MAPPING  0x40000000
 
+/* empty mapping */
+#define VM_ARM_EMPTY_MAPPING   0x20000000
+
 /* mapping type (attributes) for permanent static mappings */
 #define VM_ARM_MTYPE(mt)               ((mt) << 20)
 #define VM_ARM_MTYPE_MASK      (0x1f << 20)
index 75f9f9d67097fecddd21c0d0090c727e209f08ee..7d419193d49a14325ffe17ddb7c2da2b4cede267 100644 (file)
@@ -805,7 +805,7 @@ static void __init pmd_empty_section_gap(unsigned long addr)
        vm = early_alloc_aligned(sizeof(*vm), __alignof__(*vm));
        vm->addr = (void *)addr;
        vm->size = SECTION_SIZE;
-       vm->flags = VM_IOREMAP | VM_ARM_STATIC_MAPPING;
+       vm->flags = VM_IOREMAP | VM_ARM_EMPTY_MAPPING;
        vm->caller = pmd_empty_section_gap;
        vm_area_add_early(vm);
 }
@@ -818,7 +818,7 @@ static void __init fill_pmd_gaps(void)
 
        /* we're still single threaded hence no lock needed here */
        for (vm = vmlist; vm; vm = vm->next) {
-               if (!(vm->flags & VM_ARM_STATIC_MAPPING))
+               if (!(vm->flags & (VM_ARM_STATIC_MAPPING | VM_ARM_EMPTY_MAPPING)))
                        continue;
                addr = (unsigned long)vm->addr;
                if (addr < next)