2 #include <linux/slab.h>
3 #include <linux/string.h>
4 #include <linux/compiler.h>
5 #include <linux/export.h>
6 #include <linux/ctype.h>
8 #include <linux/sched.h>
9 #include <linux/security.h>
10 #include <linux/swap.h>
11 #include <linux/swapops.h>
12 #include <linux/mman.h>
13 #include <linux/hugetlb.h>
14 #include <linux/vmalloc.h>
16 #include <asm/sections.h>
17 #include <asm/uaccess.h>
21 static inline int is_kernel_rodata(unsigned long addr)
23 return addr >= (unsigned long)__start_rodata &&
24 addr < (unsigned long)__end_rodata;
28 * kfree_const - conditionally free memory
29 * @x: pointer to the memory
31 * Function calls kfree only if @x is not in .rodata section.
33 void kfree_const(const void *x)
35 if (!is_kernel_rodata((unsigned long)x))
38 EXPORT_SYMBOL(kfree_const);
41 * kstrdup - allocate space for and copy an existing string
42 * @s: the string to duplicate
43 * @gfp: the GFP mask used in the kmalloc() call when allocating memory
45 char *kstrdup(const char *s, gfp_t gfp)
54 buf = kmalloc_track_caller(len, gfp);
59 EXPORT_SYMBOL(kstrdup);
62 * kstrdup_const - conditionally duplicate an existing const string
63 * @s: the string to duplicate
64 * @gfp: the GFP mask used in the kmalloc() call when allocating memory
66 * Function returns source string if it is in .rodata section otherwise it
67 * fallbacks to kstrdup.
68 * Strings allocated by kstrdup_const should be freed by kfree_const.
70 const char *kstrdup_const(const char *s, gfp_t gfp)
72 if (is_kernel_rodata((unsigned long)s))
75 return kstrdup(s, gfp);
77 EXPORT_SYMBOL(kstrdup_const);
80 * kstrndup - allocate space for and copy an existing string
81 * @s: the string to duplicate
82 * @max: read at most @max chars from @s
83 * @gfp: the GFP mask used in the kmalloc() call when allocating memory
85 char *kstrndup(const char *s, size_t max, gfp_t gfp)
93 len = strnlen(s, max);
94 buf = kmalloc_track_caller(len+1, gfp);
101 EXPORT_SYMBOL(kstrndup);
104 * kstrimdup - Trim and copy a %NUL terminated string.
105 * @s: the string to trim and duplicate
106 * @gfp: the GFP mask used in the kmalloc() call when allocating memory
108 * Returns an address, which the caller must kfree, containing
109 * a duplicate of the passed string with leading and/or trailing
110 * whitespace (as defined by isspace) removed.
112 char *kstrimdup(const char *s, gfp_t gfp)
115 char *begin = skip_spaces(s);
116 size_t len = strlen(begin);
118 while (len && isspace(begin[len - 1]))
121 buf = kmalloc_track_caller(len + 1, gfp);
125 memcpy(buf, begin, len);
130 EXPORT_SYMBOL(kstrimdup);
133 * kmemdup - duplicate region of memory
135 * @src: memory region to duplicate
136 * @len: memory region length
137 * @gfp: GFP mask to use
139 void *kmemdup(const void *src, size_t len, gfp_t gfp)
143 p = kmalloc_track_caller(len, gfp);
148 EXPORT_SYMBOL(kmemdup);
151 * memdup_user - duplicate memory region from user space
153 * @src: source address in user space
154 * @len: number of bytes to copy
156 * Returns an ERR_PTR() on failure.
158 void *memdup_user(const void __user *src, size_t len)
163 * Always use GFP_KERNEL, since copy_from_user() can sleep and
164 * cause pagefault, which makes it pointless to use GFP_NOFS
167 p = kmalloc_track_caller(len, GFP_KERNEL);
169 return ERR_PTR(-ENOMEM);
171 if (copy_from_user(p, src, len)) {
173 return ERR_PTR(-EFAULT);
178 EXPORT_SYMBOL(memdup_user);
181 * strndup_user - duplicate an existing string from user space
182 * @s: The string to duplicate
183 * @n: Maximum number of bytes to copy, including the trailing NUL.
185 char *strndup_user(const char __user *s, long n)
190 length = strnlen_user(s, n);
193 return ERR_PTR(-EFAULT);
196 return ERR_PTR(-EINVAL);
198 p = memdup_user(s, length);
203 p[length - 1] = '\0';
207 EXPORT_SYMBOL(strndup_user);
209 void __vma_link_list(struct mm_struct *mm, struct vm_area_struct *vma,
210 struct vm_area_struct *prev, struct rb_node *rb_parent)
212 struct vm_area_struct *next;
216 next = prev->vm_next;
221 next = rb_entry(rb_parent,
222 struct vm_area_struct, vm_rb);
231 /* Check if the vma is being used as a stack by this task */
232 static int vm_is_stack_for_task(struct task_struct *t,
233 struct vm_area_struct *vma)
235 return (vma->vm_start <= KSTK_ESP(t) && vma->vm_end >= KSTK_ESP(t));
239 * Check if the vma is being used as a stack.
240 * If is_group is non-zero, check in the entire thread group or else
241 * just check in the current task. Returns the task_struct of the task
242 * that the vma is stack for. Must be called under rcu_read_lock().
244 struct task_struct *task_of_stack(struct task_struct *task,
245 struct vm_area_struct *vma, bool in_group)
247 if (vm_is_stack_for_task(task, vma))
251 struct task_struct *t;
253 for_each_thread(task, t) {
254 if (vm_is_stack_for_task(t, vma))
262 #if defined(CONFIG_MMU) && !defined(HAVE_ARCH_PICK_MMAP_LAYOUT)
263 void arch_pick_mmap_layout(struct mm_struct *mm)
265 mm->mmap_base = TASK_UNMAPPED_BASE;
266 mm->get_unmapped_area = arch_get_unmapped_area;
271 * Like get_user_pages_fast() except its IRQ-safe in that it won't fall
272 * back to the regular GUP.
273 * If the architecture not support this function, simply return with no
276 int __weak __get_user_pages_fast(unsigned long start,
277 int nr_pages, int write, struct page **pages)
281 EXPORT_SYMBOL_GPL(__get_user_pages_fast);
284 * get_user_pages_fast() - pin user pages in memory
285 * @start: starting user address
286 * @nr_pages: number of pages from start to pin
287 * @write: whether pages will be written to
288 * @pages: array that receives pointers to the pages pinned.
289 * Should be at least nr_pages long.
291 * Returns number of pages pinned. This may be fewer than the number
292 * requested. If nr_pages is 0 or negative, returns 0. If no pages
293 * were pinned, returns -errno.
295 * get_user_pages_fast provides equivalent functionality to get_user_pages,
296 * operating on current and current->mm, with force=0 and vma=NULL. However
297 * unlike get_user_pages, it must be called without mmap_sem held.
299 * get_user_pages_fast may take mmap_sem and page table locks, so no
300 * assumptions can be made about lack of locking. get_user_pages_fast is to be
301 * implemented in a way that is advantageous (vs get_user_pages()) when the
302 * user memory area is already faulted in and present in ptes. However if the
303 * pages have to be faulted in, it may turn out to be slightly slower so
304 * callers need to carefully consider what to use. On many architectures,
305 * get_user_pages_fast simply falls back to get_user_pages.
307 int __weak get_user_pages_fast(unsigned long start,
308 int nr_pages, int write, struct page **pages)
310 struct mm_struct *mm = current->mm;
311 return get_user_pages_unlocked(current, mm, start, nr_pages,
314 EXPORT_SYMBOL_GPL(get_user_pages_fast);
316 unsigned long vm_mmap_pgoff(struct file *file, unsigned long addr,
317 unsigned long len, unsigned long prot,
318 unsigned long flag, unsigned long pgoff)
321 struct mm_struct *mm = current->mm;
322 unsigned long populate;
324 ret = security_mmap_file(file, prot, flag);
326 down_write(&mm->mmap_sem);
327 ret = do_mmap_pgoff(file, addr, len, prot, flag, pgoff,
329 up_write(&mm->mmap_sem);
331 mm_populate(ret, populate);
336 unsigned long vm_mmap(struct file *file, unsigned long addr,
337 unsigned long len, unsigned long prot,
338 unsigned long flag, unsigned long offset)
340 if (unlikely(offset + PAGE_ALIGN(len) < offset))
342 if (unlikely(offset_in_page(offset)))
345 return vm_mmap_pgoff(file, addr, len, prot, flag, offset >> PAGE_SHIFT);
347 EXPORT_SYMBOL(vm_mmap);
349 void kvfree(const void *addr)
351 if (is_vmalloc_addr(addr))
356 EXPORT_SYMBOL(kvfree);
358 static inline void *__page_rmapping(struct page *page)
360 unsigned long mapping;
362 mapping = (unsigned long)page->mapping;
363 mapping &= ~PAGE_MAPPING_FLAGS;
365 return (void *)mapping;
368 /* Neutral page->mapping pointer to address_space or anon_vma or other */
369 void *page_rmapping(struct page *page)
371 page = compound_head(page);
372 return __page_rmapping(page);
375 struct anon_vma *page_anon_vma(struct page *page)
377 unsigned long mapping;
379 page = compound_head(page);
380 mapping = (unsigned long)page->mapping;
381 if ((mapping & PAGE_MAPPING_FLAGS) != PAGE_MAPPING_ANON)
383 return __page_rmapping(page);
386 struct address_space *page_mapping(struct page *page)
388 struct address_space *mapping;
390 page = compound_head(page);
392 /* This happens if someone calls flush_dcache_page on slab page */
393 if (unlikely(PageSlab(page)))
396 if (unlikely(PageSwapCache(page))) {
399 entry.val = page_private(page);
400 return swap_address_space(entry);
403 mapping = page->mapping;
404 if ((unsigned long)mapping & PAGE_MAPPING_FLAGS)
409 int overcommit_ratio_handler(struct ctl_table *table, int write,
410 void __user *buffer, size_t *lenp,
415 ret = proc_dointvec(table, write, buffer, lenp, ppos);
416 if (ret == 0 && write)
417 sysctl_overcommit_kbytes = 0;
421 int overcommit_kbytes_handler(struct ctl_table *table, int write,
422 void __user *buffer, size_t *lenp,
427 ret = proc_doulongvec_minmax(table, write, buffer, lenp, ppos);
428 if (ret == 0 && write)
429 sysctl_overcommit_ratio = 0;
434 * Committed memory limit enforced when OVERCOMMIT_NEVER policy is used
436 unsigned long vm_commit_limit(void)
438 unsigned long allowed;
440 if (sysctl_overcommit_kbytes)
441 allowed = sysctl_overcommit_kbytes >> (PAGE_SHIFT - 10);
443 allowed = ((totalram_pages - hugetlb_total_pages())
444 * sysctl_overcommit_ratio / 100);
445 allowed += total_swap_pages;
451 * get_cmdline() - copy the cmdline value to a buffer.
452 * @task: the task whose cmdline value to copy.
453 * @buffer: the buffer to copy to.
454 * @buflen: the length of the buffer. Larger cmdline values are truncated
456 * Returns the size of the cmdline field copied. Note that the copy does
457 * not guarantee an ending NULL byte.
459 int get_cmdline(struct task_struct *task, char *buffer, int buflen)
463 struct mm_struct *mm = get_task_mm(task);
467 goto out_mm; /* Shh! No looking before we're done */
469 len = mm->arg_end - mm->arg_start;
474 res = access_process_vm(task, mm->arg_start, buffer, len, 0);
477 * If the nul at the end of args has been overwritten, then
478 * assume application is using setproctitle(3).
480 if (res > 0 && buffer[res-1] != '\0' && len < buflen) {
481 len = strnlen(buffer, res);
485 len = mm->env_end - mm->env_start;
486 if (len > buflen - res)
488 res += access_process_vm(task, mm->env_start,
490 res = strnlen(buffer, res);