]> git.kernelconcepts.de Git - karo-tx-linux.git/blob - mm/madvise.c
fs/ncpfs/dir.c: remove unnecessary new_valid_dev() check
[karo-tx-linux.git] / mm / madvise.c
1 /*
2  *      linux/mm/madvise.c
3  *
4  * Copyright (C) 1999  Linus Torvalds
5  * Copyright (C) 2002  Christoph Hellwig
6  */
7
8 #include <linux/mman.h>
9 #include <linux/pagemap.h>
10 #include <linux/syscalls.h>
11 #include <linux/mempolicy.h>
12 #include <linux/page-isolation.h>
13 #include <linux/hugetlb.h>
14 #include <linux/falloc.h>
15 #include <linux/sched.h>
16 #include <linux/ksm.h>
17 #include <linux/fs.h>
18 #include <linux/file.h>
19 #include <linux/blkdev.h>
20 #include <linux/backing-dev.h>
21 #include <linux/swap.h>
22 #include <linux/swapops.h>
23 #include <linux/mmu_notifier.h>
24
25 #include <asm/tlb.h>
26
27 /*
28  * Any behaviour which results in changes to the vma->vm_flags needs to
29  * take mmap_sem for writing. Others, which simply traverse vmas, need
30  * to only take it for reading.
31  */
32 static int madvise_need_mmap_write(int behavior)
33 {
34         switch (behavior) {
35         case MADV_REMOVE:
36         case MADV_WILLNEED:
37         case MADV_DONTNEED:
38         case MADV_FREE:
39                 return 0;
40         default:
41                 /* be safe, default to 1. list exceptions explicitly */
42                 return 1;
43         }
44 }
45
46 /*
47  * We can potentially split a vm area into separate
48  * areas, each area with its own behavior.
49  */
50 static long madvise_behavior(struct vm_area_struct *vma,
51                      struct vm_area_struct **prev,
52                      unsigned long start, unsigned long end, int behavior)
53 {
54         struct mm_struct *mm = vma->vm_mm;
55         int error = 0;
56         pgoff_t pgoff;
57         unsigned long new_flags = vma->vm_flags;
58
59         switch (behavior) {
60         case MADV_NORMAL:
61                 new_flags = new_flags & ~VM_RAND_READ & ~VM_SEQ_READ;
62                 break;
63         case MADV_SEQUENTIAL:
64                 new_flags = (new_flags & ~VM_RAND_READ) | VM_SEQ_READ;
65                 break;
66         case MADV_RANDOM:
67                 new_flags = (new_flags & ~VM_SEQ_READ) | VM_RAND_READ;
68                 break;
69         case MADV_DONTFORK:
70                 new_flags |= VM_DONTCOPY;
71                 break;
72         case MADV_DOFORK:
73                 if (vma->vm_flags & VM_IO) {
74                         error = -EINVAL;
75                         goto out;
76                 }
77                 new_flags &= ~VM_DONTCOPY;
78                 break;
79         case MADV_DONTDUMP:
80                 new_flags |= VM_DONTDUMP;
81                 break;
82         case MADV_DODUMP:
83                 if (new_flags & VM_SPECIAL) {
84                         error = -EINVAL;
85                         goto out;
86                 }
87                 new_flags &= ~VM_DONTDUMP;
88                 break;
89         case MADV_MERGEABLE:
90         case MADV_UNMERGEABLE:
91                 error = ksm_madvise(vma, start, end, behavior, &new_flags);
92                 if (error)
93                         goto out;
94                 break;
95         case MADV_HUGEPAGE:
96         case MADV_NOHUGEPAGE:
97                 error = hugepage_madvise(vma, &new_flags, behavior);
98                 if (error)
99                         goto out;
100                 break;
101         }
102
103         if (new_flags == vma->vm_flags) {
104                 *prev = vma;
105                 goto out;
106         }
107
108         pgoff = vma->vm_pgoff + ((start - vma->vm_start) >> PAGE_SHIFT);
109         *prev = vma_merge(mm, *prev, start, end, new_flags, vma->anon_vma,
110                           vma->vm_file, pgoff, vma_policy(vma),
111                           vma->vm_userfaultfd_ctx);
112         if (*prev) {
113                 vma = *prev;
114                 goto success;
115         }
116
117         *prev = vma;
118
119         if (start != vma->vm_start) {
120                 error = split_vma(mm, vma, start, 1);
121                 if (error)
122                         goto out;
123         }
124
125         if (end != vma->vm_end) {
126                 error = split_vma(mm, vma, end, 0);
127                 if (error)
128                         goto out;
129         }
130
131 success:
132         /*
133          * vm_flags is protected by the mmap_sem held in write mode.
134          */
135         vma->vm_flags = new_flags;
136
137 out:
138         if (error == -ENOMEM)
139                 error = -EAGAIN;
140         return error;
141 }
142
143 #ifdef CONFIG_SWAP
144 static int swapin_walk_pmd_entry(pmd_t *pmd, unsigned long start,
145         unsigned long end, struct mm_walk *walk)
146 {
147         pte_t *orig_pte;
148         struct vm_area_struct *vma = walk->private;
149         unsigned long index;
150
151         if (pmd_none_or_trans_huge_or_clear_bad(pmd))
152                 return 0;
153
154         for (index = start; index != end; index += PAGE_SIZE) {
155                 pte_t pte;
156                 swp_entry_t entry;
157                 struct page *page;
158                 spinlock_t *ptl;
159
160                 orig_pte = pte_offset_map_lock(vma->vm_mm, pmd, start, &ptl);
161                 pte = *(orig_pte + ((index - start) / PAGE_SIZE));
162                 pte_unmap_unlock(orig_pte, ptl);
163
164                 if (pte_present(pte) || pte_none(pte))
165                         continue;
166                 entry = pte_to_swp_entry(pte);
167                 if (unlikely(non_swap_entry(entry)))
168                         continue;
169
170                 page = read_swap_cache_async(entry, GFP_HIGHUSER_MOVABLE,
171                                                                 vma, index);
172                 if (page)
173                         page_cache_release(page);
174         }
175
176         return 0;
177 }
178
179 static void force_swapin_readahead(struct vm_area_struct *vma,
180                 unsigned long start, unsigned long end)
181 {
182         struct mm_walk walk = {
183                 .mm = vma->vm_mm,
184                 .pmd_entry = swapin_walk_pmd_entry,
185                 .private = vma,
186         };
187
188         walk_page_range(start, end, &walk);
189
190         lru_add_drain();        /* Push any new pages onto the LRU now */
191 }
192
193 static void force_shm_swapin_readahead(struct vm_area_struct *vma,
194                 unsigned long start, unsigned long end,
195                 struct address_space *mapping)
196 {
197         pgoff_t index;
198         struct page *page;
199         swp_entry_t swap;
200
201         for (; start < end; start += PAGE_SIZE) {
202                 index = ((start - vma->vm_start) >> PAGE_SHIFT) + vma->vm_pgoff;
203
204                 page = find_get_entry(mapping, index);
205                 if (!radix_tree_exceptional_entry(page)) {
206                         if (page)
207                                 page_cache_release(page);
208                         continue;
209                 }
210                 swap = radix_to_swp_entry(page);
211                 page = read_swap_cache_async(swap, GFP_HIGHUSER_MOVABLE,
212                                                                 NULL, 0);
213                 if (page)
214                         page_cache_release(page);
215         }
216
217         lru_add_drain();        /* Push any new pages onto the LRU now */
218 }
219 #endif          /* CONFIG_SWAP */
220
221 /*
222  * Schedule all required I/O operations.  Do not wait for completion.
223  */
224 static long madvise_willneed(struct vm_area_struct *vma,
225                              struct vm_area_struct **prev,
226                              unsigned long start, unsigned long end)
227 {
228         struct file *file = vma->vm_file;
229
230 #ifdef CONFIG_SWAP
231         if (!file) {
232                 *prev = vma;
233                 force_swapin_readahead(vma, start, end);
234                 return 0;
235         }
236
237         if (shmem_mapping(file->f_mapping)) {
238                 *prev = vma;
239                 force_shm_swapin_readahead(vma, start, end,
240                                         file->f_mapping);
241                 return 0;
242         }
243 #else
244         if (!file)
245                 return -EBADF;
246 #endif
247
248         if (IS_DAX(file_inode(file))) {
249                 /* no bad return value, but ignore advice */
250                 return 0;
251         }
252
253         *prev = vma;
254         start = ((start - vma->vm_start) >> PAGE_SHIFT) + vma->vm_pgoff;
255         if (end > vma->vm_end)
256                 end = vma->vm_end;
257         end = ((end - vma->vm_start) >> PAGE_SHIFT) + vma->vm_pgoff;
258
259         force_page_cache_readahead(file->f_mapping, file, start, end - start);
260         return 0;
261 }
262
263 static int madvise_free_pte_range(pmd_t *pmd, unsigned long addr,
264                                 unsigned long end, struct mm_walk *walk)
265
266 {
267         struct mmu_gather *tlb = walk->private;
268         struct mm_struct *mm = tlb->mm;
269         struct vm_area_struct *vma = walk->vma;
270         spinlock_t *ptl;
271         pte_t *pte, ptent;
272         struct page *page;
273         swp_entry_t entry;
274         unsigned long next;
275         int nr_swap = 0;
276
277         next = pmd_addr_end(addr, end);
278         if (pmd_trans_huge(*pmd)) {
279                 if (next - addr != HPAGE_PMD_SIZE)
280                         split_huge_pmd(vma, pmd, addr);
281                 else if (!madvise_free_huge_pmd(tlb, vma, pmd, addr))
282                         goto next;
283                 /* fall through */
284         }
285
286         if (pmd_trans_unstable(pmd))
287                 return 0;
288
289         pte = pte_offset_map_lock(mm, pmd, addr, &ptl);
290         arch_enter_lazy_mmu_mode();
291         for (; addr != end; pte++, addr += PAGE_SIZE) {
292                 ptent = *pte;
293
294                 if (pte_none(ptent))
295                         continue;
296                 /*
297                  * If the pte has swp_entry, just clear page table to
298                  * prevent swap-in which is more expensive rather than
299                  * (page allocation + zeroing).
300                  */
301                 if (!pte_present(ptent)) {
302                         entry = pte_to_swp_entry(ptent);
303                         if (non_swap_entry(entry))
304                                 continue;
305                         nr_swap--;
306                         free_swap_and_cache(entry);
307                         pte_clear_not_present_full(mm, addr, pte, tlb->fullmm);
308                         continue;
309                 }
310
311                 page = vm_normal_page(vma, addr, ptent);
312                 if (!page)
313                         continue;
314
315                 if (PageSwapCache(page) || PageDirty(page)) {
316                         if (!trylock_page(page))
317                                 continue;
318                         /*
319                          * If page is shared with others, we couldn't clear
320                          * PG_dirty of the page.
321                          */
322                         if (page_count(page) != 1 + !!PageSwapCache(page)) {
323                                 unlock_page(page);
324                                 continue;
325                         }
326
327                         if (PageSwapCache(page) && !try_to_free_swap(page)) {
328                                 unlock_page(page);
329                                 continue;
330                         }
331
332                         ClearPageDirty(page);
333                         unlock_page(page);
334                 }
335
336                 /*
337                  * Some of architecture(ex, PPC) don't update TLB
338                  * with set_pte_at and tlb_remove_tlb_entry so for
339                  * the portability, remap the pte with old|clean
340                  * after pte clearing.
341                  */
342                 ptent = ptep_get_and_clear_full(mm, addr, pte,
343                                                 tlb->fullmm);
344                 ptent = pte_mkold(ptent);
345                 ptent = pte_mkclean(ptent);
346                 set_pte_at(mm, addr, pte, ptent);
347                 if (PageActive(page))
348                         deactivate_page(page);
349                 tlb_remove_tlb_entry(tlb, pte, addr);
350         }
351
352         if (nr_swap) {
353                 if (current->mm == mm)
354                         sync_mm_rss(mm);
355
356                 add_mm_counter(mm, MM_SWAPENTS, nr_swap);
357         }
358
359         arch_leave_lazy_mmu_mode();
360         pte_unmap_unlock(pte - 1, ptl);
361 next:
362         cond_resched();
363         return 0;
364 }
365
366 static void madvise_free_page_range(struct mmu_gather *tlb,
367                              struct vm_area_struct *vma,
368                              unsigned long addr, unsigned long end)
369 {
370         struct mm_walk free_walk = {
371                 .pmd_entry = madvise_free_pte_range,
372                 .mm = vma->vm_mm,
373                 .private = tlb,
374         };
375
376         BUG_ON(addr >= end);
377         tlb_start_vma(tlb, vma);
378         walk_page_range(addr, end, &free_walk);
379         tlb_end_vma(tlb, vma);
380 }
381
382 static int madvise_free_single_vma(struct vm_area_struct *vma,
383                         unsigned long start_addr, unsigned long end_addr)
384 {
385         unsigned long start, end;
386         struct mm_struct *mm = vma->vm_mm;
387         struct mmu_gather tlb;
388
389         if (vma->vm_flags & (VM_LOCKED|VM_HUGETLB|VM_PFNMAP))
390                 return -EINVAL;
391
392         /* MADV_FREE works for only anon vma at the moment */
393         if (!vma_is_anonymous(vma))
394                 return -EINVAL;
395
396         start = max(vma->vm_start, start_addr);
397         if (start >= vma->vm_end)
398                 return -EINVAL;
399         end = min(vma->vm_end, end_addr);
400         if (end <= vma->vm_start)
401                 return -EINVAL;
402
403         lru_add_drain();
404         tlb_gather_mmu(&tlb, mm, start, end);
405         update_hiwater_rss(mm);
406
407         mmu_notifier_invalidate_range_start(mm, start, end);
408         madvise_free_page_range(&tlb, vma, start, end);
409         mmu_notifier_invalidate_range_end(mm, start, end);
410         tlb_finish_mmu(&tlb, start, end);
411
412         return 0;
413 }
414
415 static long madvise_free(struct vm_area_struct *vma,
416                              struct vm_area_struct **prev,
417                              unsigned long start, unsigned long end)
418 {
419         *prev = vma;
420         return madvise_free_single_vma(vma, start, end);
421 }
422
423 /*
424  * Application no longer needs these pages.  If the pages are dirty,
425  * it's OK to just throw them away.  The app will be more careful about
426  * data it wants to keep.  Be sure to free swap resources too.  The
427  * zap_page_range call sets things up for shrink_active_list to actually free
428  * these pages later if no one else has touched them in the meantime,
429  * although we could add these pages to a global reuse list for
430  * shrink_active_list to pick up before reclaiming other pages.
431  *
432  * NB: This interface discards data rather than pushes it out to swap,
433  * as some implementations do.  This has performance implications for
434  * applications like large transactional databases which want to discard
435  * pages in anonymous maps after committing to backing store the data
436  * that was kept in them.  There is no reason to write this data out to
437  * the swap area if the application is discarding it.
438  *
439  * An interface that causes the system to free clean pages and flush
440  * dirty pages is already available as msync(MS_INVALIDATE).
441  */
442 static long madvise_dontneed(struct vm_area_struct *vma,
443                              struct vm_area_struct **prev,
444                              unsigned long start, unsigned long end)
445 {
446         *prev = vma;
447         if (vma->vm_flags & (VM_LOCKED|VM_HUGETLB|VM_PFNMAP))
448                 return -EINVAL;
449
450         zap_page_range(vma, start, end - start, NULL);
451         return 0;
452 }
453
454 /*
455  * Application wants to free up the pages and associated backing store.
456  * This is effectively punching a hole into the middle of a file.
457  */
458 static long madvise_remove(struct vm_area_struct *vma,
459                                 struct vm_area_struct **prev,
460                                 unsigned long start, unsigned long end)
461 {
462         loff_t offset;
463         int error;
464         struct file *f;
465
466         *prev = NULL;   /* tell sys_madvise we drop mmap_sem */
467
468         if (vma->vm_flags & VM_LOCKED)
469                 return -EINVAL;
470
471         f = vma->vm_file;
472
473         if (!f || !f->f_mapping || !f->f_mapping->host) {
474                         return -EINVAL;
475         }
476
477         if ((vma->vm_flags & (VM_SHARED|VM_WRITE)) != (VM_SHARED|VM_WRITE))
478                 return -EACCES;
479
480         offset = (loff_t)(start - vma->vm_start)
481                         + ((loff_t)vma->vm_pgoff << PAGE_SHIFT);
482
483         /*
484          * Filesystem's fallocate may need to take i_mutex.  We need to
485          * explicitly grab a reference because the vma (and hence the
486          * vma's reference to the file) can go away as soon as we drop
487          * mmap_sem.
488          */
489         get_file(f);
490         up_read(&current->mm->mmap_sem);
491         error = vfs_fallocate(f,
492                                 FALLOC_FL_PUNCH_HOLE | FALLOC_FL_KEEP_SIZE,
493                                 offset, end - start);
494         fput(f);
495         down_read(&current->mm->mmap_sem);
496         return error;
497 }
498
499 #ifdef CONFIG_MEMORY_FAILURE
500 /*
501  * Error injection support for memory error handling.
502  */
503 static int madvise_hwpoison(int bhv, unsigned long start, unsigned long end)
504 {
505         struct page *p;
506         if (!capable(CAP_SYS_ADMIN))
507                 return -EPERM;
508         for (; start < end; start += PAGE_SIZE <<
509                                 compound_order(compound_head(p))) {
510                 int ret;
511
512                 ret = get_user_pages_fast(start, 1, 0, &p);
513                 if (ret != 1)
514                         return ret;
515
516                 if (PageHWPoison(p)) {
517                         put_page(p);
518                         continue;
519                 }
520                 if (bhv == MADV_SOFT_OFFLINE) {
521                         pr_info("Soft offlining page %#lx at %#lx\n",
522                                 page_to_pfn(p), start);
523                         ret = soft_offline_page(p, MF_COUNT_INCREASED);
524                         if (ret)
525                                 return ret;
526                         continue;
527                 }
528                 pr_info("Injecting memory failure for page %#lx at %#lx\n",
529                        page_to_pfn(p), start);
530                 /* Ignore return value for now */
531                 memory_failure(page_to_pfn(p), 0, MF_COUNT_INCREASED);
532         }
533         return 0;
534 }
535 #endif
536
537 static long
538 madvise_vma(struct vm_area_struct *vma, struct vm_area_struct **prev,
539                 unsigned long start, unsigned long end, int behavior)
540 {
541         switch (behavior) {
542         case MADV_REMOVE:
543                 return madvise_remove(vma, prev, start, end);
544         case MADV_WILLNEED:
545                 return madvise_willneed(vma, prev, start, end);
546         case MADV_FREE:
547                 /*
548                  * XXX: In this implementation, MADV_FREE works like
549                  * MADV_DONTNEED on swapless system or full swap.
550                  */
551                 if (get_nr_swap_pages() > 0)
552                         return madvise_free(vma, prev, start, end);
553                 /* passthrough */
554         case MADV_DONTNEED:
555                 return madvise_dontneed(vma, prev, start, end);
556         default:
557                 return madvise_behavior(vma, prev, start, end, behavior);
558         }
559 }
560
561 static bool
562 madvise_behavior_valid(int behavior)
563 {
564         switch (behavior) {
565         case MADV_DOFORK:
566         case MADV_DONTFORK:
567         case MADV_NORMAL:
568         case MADV_SEQUENTIAL:
569         case MADV_RANDOM:
570         case MADV_REMOVE:
571         case MADV_WILLNEED:
572         case MADV_DONTNEED:
573         case MADV_FREE:
574 #ifdef CONFIG_KSM
575         case MADV_MERGEABLE:
576         case MADV_UNMERGEABLE:
577 #endif
578 #ifdef CONFIG_TRANSPARENT_HUGEPAGE
579         case MADV_HUGEPAGE:
580         case MADV_NOHUGEPAGE:
581 #endif
582         case MADV_DONTDUMP:
583         case MADV_DODUMP:
584                 return true;
585
586         default:
587                 return false;
588         }
589 }
590
591 /*
592  * The madvise(2) system call.
593  *
594  * Applications can use madvise() to advise the kernel how it should
595  * handle paging I/O in this VM area.  The idea is to help the kernel
596  * use appropriate read-ahead and caching techniques.  The information
597  * provided is advisory only, and can be safely disregarded by the
598  * kernel without affecting the correct operation of the application.
599  *
600  * behavior values:
601  *  MADV_NORMAL - the default behavior is to read clusters.  This
602  *              results in some read-ahead and read-behind.
603  *  MADV_RANDOM - the system should read the minimum amount of data
604  *              on any access, since it is unlikely that the appli-
605  *              cation will need more than what it asks for.
606  *  MADV_SEQUENTIAL - pages in the given range will probably be accessed
607  *              once, so they can be aggressively read ahead, and
608  *              can be freed soon after they are accessed.
609  *  MADV_WILLNEED - the application is notifying the system to read
610  *              some pages ahead.
611  *  MADV_DONTNEED - the application is finished with the given range,
612  *              so the kernel can free resources associated with it.
613  *  MADV_REMOVE - the application wants to free up the given range of
614  *              pages and associated backing store.
615  *  MADV_DONTFORK - omit this area from child's address space when forking:
616  *              typically, to avoid COWing pages pinned by get_user_pages().
617  *  MADV_DOFORK - cancel MADV_DONTFORK: no longer omit this area when forking.
618  *  MADV_MERGEABLE - the application recommends that KSM try to merge pages in
619  *              this area with pages of identical content from other such areas.
620  *  MADV_UNMERGEABLE- cancel MADV_MERGEABLE: no longer merge pages with others.
621  *
622  * return values:
623  *  zero    - success
624  *  -EINVAL - start + len < 0, start is not page-aligned,
625  *              "behavior" is not a valid value, or application
626  *              is attempting to release locked or shared pages.
627  *  -ENOMEM - addresses in the specified range are not currently
628  *              mapped, or are outside the AS of the process.
629  *  -EIO    - an I/O error occurred while paging in data.
630  *  -EBADF  - map exists, but area maps something that isn't a file.
631  *  -EAGAIN - a kernel resource was temporarily unavailable.
632  */
633 SYSCALL_DEFINE3(madvise, unsigned long, start, size_t, len_in, int, behavior)
634 {
635         unsigned long end, tmp;
636         struct vm_area_struct *vma, *prev;
637         int unmapped_error = 0;
638         int error = -EINVAL;
639         int write;
640         size_t len;
641         struct blk_plug plug;
642
643 #ifdef CONFIG_MEMORY_FAILURE
644         if (behavior == MADV_HWPOISON || behavior == MADV_SOFT_OFFLINE)
645                 return madvise_hwpoison(behavior, start, start+len_in);
646 #endif
647         if (!madvise_behavior_valid(behavior))
648                 return error;
649
650         if (start & ~PAGE_MASK)
651                 return error;
652         len = (len_in + ~PAGE_MASK) & PAGE_MASK;
653
654         /* Check to see whether len was rounded up from small -ve to zero */
655         if (len_in && !len)
656                 return error;
657
658         end = start + len;
659         if (end < start)
660                 return error;
661
662         error = 0;
663         if (end == start)
664                 return error;
665
666         write = madvise_need_mmap_write(behavior);
667         if (write)
668                 down_write(&current->mm->mmap_sem);
669         else
670                 down_read(&current->mm->mmap_sem);
671
672         /*
673          * If the interval [start,end) covers some unmapped address
674          * ranges, just ignore them, but return -ENOMEM at the end.
675          * - different from the way of handling in mlock etc.
676          */
677         vma = find_vma_prev(current->mm, start, &prev);
678         if (vma && start > vma->vm_start)
679                 prev = vma;
680
681         blk_start_plug(&plug);
682         for (;;) {
683                 /* Still start < end. */
684                 error = -ENOMEM;
685                 if (!vma)
686                         goto out;
687
688                 /* Here start < (end|vma->vm_end). */
689                 if (start < vma->vm_start) {
690                         unmapped_error = -ENOMEM;
691                         start = vma->vm_start;
692                         if (start >= end)
693                                 goto out;
694                 }
695
696                 /* Here vma->vm_start <= start < (end|vma->vm_end) */
697                 tmp = vma->vm_end;
698                 if (end < tmp)
699                         tmp = end;
700
701                 /* Here vma->vm_start <= start < tmp <= (end|vma->vm_end). */
702                 error = madvise_vma(vma, &prev, start, tmp, behavior);
703                 if (error)
704                         goto out;
705                 start = tmp;
706                 if (prev && start < prev->vm_end)
707                         start = prev->vm_end;
708                 error = unmapped_error;
709                 if (start >= end)
710                         goto out;
711                 if (prev)
712                         vma = prev->vm_next;
713                 else    /* madvise_remove dropped mmap_sem */
714                         vma = find_vma(current->mm, start);
715         }
716 out:
717         blk_finish_plug(&plug);
718         if (write)
719                 up_write(&current->mm->mmap_sem);
720         else
721                 up_read(&current->mm->mmap_sem);
722
723         return error;
724 }