2 * flexible mmap layout support
4 * Copyright 2003-2004 Red Hat Inc., Durham, North Carolina.
7 * This program is free software; you can redistribute it and/or modify
8 * it under the terms of the GNU General Public License as published by
9 * the Free Software Foundation; either version 2 of the License, or
10 * (at your option) any later version.
12 * This program is distributed in the hope that it will be useful,
13 * but WITHOUT ANY WARRANTY; without even the implied warranty of
14 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
15 * GNU General Public License for more details.
17 * You should have received a copy of the GNU General Public License
18 * along with this program; if not, write to the Free Software
19 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
22 * Started by Ingo Molnar <mingo@elte.hu>
25 #include <linux/personality.h>
27 #include <linux/random.h>
28 #include <linux/sched/signal.h>
29 #include <linux/sched/mm.h>
30 #include <linux/elf-randomize.h>
31 #include <linux/security.h>
32 #include <linux/mman.h>
35 * Top of mmap area (just below the process stack).
37 * Leave at least a ~128 MB hole on 32bit applications.
39 * On 64bit applications we randomise the stack by 1GB so we need to
40 * space our mmap start address by a further 1GB, otherwise there is a
41 * chance the mmap area will end up closer to the stack than our ulimit
44 #define MIN_GAP32 (128*1024*1024)
45 #define MIN_GAP64 ((128 + 1024)*1024*1024UL)
46 #define MIN_GAP ((is_32bit_task()) ? MIN_GAP32 : MIN_GAP64)
47 #define MAX_GAP (TASK_SIZE/6*5)
49 static inline int mmap_is_legacy(void)
51 if (current->personality & ADDR_COMPAT_LAYOUT)
54 if (rlimit(RLIMIT_STACK) == RLIM_INFINITY)
57 return sysctl_legacy_va_layout;
60 unsigned long arch_mmap_rnd(void)
62 unsigned long shift, rnd;
64 shift = mmap_rnd_bits;
67 shift = mmap_rnd_compat_bits;
69 rnd = get_random_long() % (1ul << shift);
71 return rnd << PAGE_SHIFT;
74 static inline unsigned long mmap_base(unsigned long rnd)
76 unsigned long gap = rlimit(RLIMIT_STACK);
80 else if (gap > MAX_GAP)
83 return PAGE_ALIGN(DEFAULT_MAP_WINDOW - gap - rnd);
86 #ifdef CONFIG_PPC_RADIX_MMU
88 * Same function as generic code used only for radix, because we don't need to overload
89 * the generic one. But we will have to duplicate, because hash select
90 * HAVE_ARCH_UNMAPPED_AREA
93 radix__arch_get_unmapped_area(struct file *filp, unsigned long addr,
94 unsigned long len, unsigned long pgoff,
97 struct mm_struct *mm = current->mm;
98 struct vm_area_struct *vma;
99 struct vm_unmapped_area_info info;
101 if (unlikely(addr > mm->context.addr_limit &&
102 mm->context.addr_limit != TASK_SIZE))
103 mm->context.addr_limit = TASK_SIZE;
105 if (len > mm->task_size - mmap_min_addr)
108 if (flags & MAP_FIXED)
112 addr = PAGE_ALIGN(addr);
113 vma = find_vma(mm, addr);
114 if (mm->task_size - len >= addr && addr >= mmap_min_addr &&
115 (!vma || addr + len <= vm_start_gap(vma)))
121 info.low_limit = mm->mmap_base;
124 if (unlikely(addr > DEFAULT_MAP_WINDOW))
125 info.high_limit = mm->context.addr_limit;
127 info.high_limit = DEFAULT_MAP_WINDOW;
129 return vm_unmapped_area(&info);
133 radix__arch_get_unmapped_area_topdown(struct file *filp,
134 const unsigned long addr0,
135 const unsigned long len,
136 const unsigned long pgoff,
137 const unsigned long flags)
139 struct vm_area_struct *vma;
140 struct mm_struct *mm = current->mm;
141 unsigned long addr = addr0;
142 struct vm_unmapped_area_info info;
144 if (unlikely(addr > mm->context.addr_limit &&
145 mm->context.addr_limit != TASK_SIZE))
146 mm->context.addr_limit = TASK_SIZE;
148 /* requested length too big for entire address space */
149 if (len > mm->task_size - mmap_min_addr)
152 if (flags & MAP_FIXED)
155 /* requesting a specific address */
157 addr = PAGE_ALIGN(addr);
158 vma = find_vma(mm, addr);
159 if (mm->task_size - len >= addr && addr >= mmap_min_addr &&
160 (!vma || addr + len <= vm_start_gap(vma)))
164 info.flags = VM_UNMAPPED_AREA_TOPDOWN;
166 info.low_limit = max(PAGE_SIZE, mmap_min_addr);
167 info.high_limit = mm->mmap_base;
170 if (addr > DEFAULT_MAP_WINDOW)
171 info.high_limit += mm->context.addr_limit - DEFAULT_MAP_WINDOW;
173 addr = vm_unmapped_area(&info);
174 if (!(addr & ~PAGE_MASK))
176 VM_BUG_ON(addr != -ENOMEM);
179 * A failed mmap() very likely causes application failure,
180 * so fall back to the bottom-up function here. This scenario
181 * can happen with large stack limits and large mmap()
184 return radix__arch_get_unmapped_area(filp, addr0, len, pgoff, flags);
187 static void radix__arch_pick_mmap_layout(struct mm_struct *mm,
188 unsigned long random_factor)
190 if (mmap_is_legacy()) {
191 mm->mmap_base = TASK_UNMAPPED_BASE;
192 mm->get_unmapped_area = radix__arch_get_unmapped_area;
194 mm->mmap_base = mmap_base(random_factor);
195 mm->get_unmapped_area = radix__arch_get_unmapped_area_topdown;
200 extern void radix__arch_pick_mmap_layout(struct mm_struct *mm,
201 unsigned long random_factor);
204 * This function, called very early during the creation of a new
205 * process VM image, sets up which VM layout function to use:
207 void arch_pick_mmap_layout(struct mm_struct *mm)
209 unsigned long random_factor = 0UL;
211 if (current->flags & PF_RANDOMIZE)
212 random_factor = arch_mmap_rnd();
215 return radix__arch_pick_mmap_layout(mm, random_factor);
217 * Fall back to the standard layout if the personality
218 * bit is set, or if the expected stack growth is unlimited:
220 if (mmap_is_legacy()) {
221 mm->mmap_base = TASK_UNMAPPED_BASE;
222 mm->get_unmapped_area = arch_get_unmapped_area;
224 mm->mmap_base = mmap_base(random_factor);
225 mm->get_unmapped_area = arch_get_unmapped_area_topdown;