]> git.kernelconcepts.de Git - karo-tx-linux.git/blob - mm/memory.c
4bdd1186b43bee6092213eada1b572ab34d1445b
[karo-tx-linux.git] / mm / memory.c
1 /*
2  *  linux/mm/memory.c
3  *
4  *  Copyright (C) 1991, 1992, 1993, 1994  Linus Torvalds
5  */
6
7 /*
8  * demand-loading started 01.12.91 - seems it is high on the list of
9  * things wanted, and it should be easy to implement. - Linus
10  */
11
12 /*
13  * Ok, demand-loading was easy, shared pages a little bit tricker. Shared
14  * pages started 02.12.91, seems to work. - Linus.
15  *
16  * Tested sharing by executing about 30 /bin/sh: under the old kernel it
17  * would have taken more than the 6M I have free, but it worked well as
18  * far as I could see.
19  *
20  * Also corrected some "invalidate()"s - I wasn't doing enough of them.
21  */
22
23 /*
24  * Real VM (paging to/from disk) started 18.12.91. Much more work and
25  * thought has to go into this. Oh, well..
26  * 19.12.91  -  works, somewhat. Sometimes I get faults, don't know why.
27  *              Found it. Everything seems to work now.
28  * 20.12.91  -  Ok, making the swap-device changeable like the root.
29  */
30
31 /*
32  * 05.04.94  -  Multi-page memory management added for v1.1.
33  *              Idea by Alex Bligh (alex@cconcepts.co.uk)
34  *
35  * 16.07.99  -  Support of BIGMEM added by Gerhard Wichert, Siemens AG
36  *              (Gerhard.Wichert@pdb.siemens.de)
37  *
38  * Aug/Sep 2004 Changed to four level page tables (Andi Kleen)
39  */
40
41 #include <linux/kernel_stat.h>
42 #include <linux/mm.h>
43 #include <linux/hugetlb.h>
44 #include <linux/mman.h>
45 #include <linux/swap.h>
46 #include <linux/highmem.h>
47 #include <linux/pagemap.h>
48 #include <linux/rmap.h>
49 #include <linux/module.h>
50 #include <linux/init.h>
51
52 #include <asm/pgalloc.h>
53 #include <asm/uaccess.h>
54 #include <asm/tlb.h>
55 #include <asm/tlbflush.h>
56 #include <asm/pgtable.h>
57
58 #include <linux/swapops.h>
59 #include <linux/elf.h>
60
61 #ifndef CONFIG_NEED_MULTIPLE_NODES
62 /* use the per-pgdat data instead for discontigmem - mbligh */
63 unsigned long max_mapnr;
64 struct page *mem_map;
65
66 EXPORT_SYMBOL(max_mapnr);
67 EXPORT_SYMBOL(mem_map);
68 #endif
69
70 unsigned long num_physpages;
71 /*
72  * A number of key systems in x86 including ioremap() rely on the assumption
73  * that high_memory defines the upper bound on direct map memory, then end
74  * of ZONE_NORMAL.  Under CONFIG_DISCONTIG this means that max_low_pfn and
75  * highstart_pfn must be the same; there must be no gap between ZONE_NORMAL
76  * and ZONE_HIGHMEM.
77  */
78 void * high_memory;
79 unsigned long vmalloc_earlyreserve;
80
81 EXPORT_SYMBOL(num_physpages);
82 EXPORT_SYMBOL(high_memory);
83 EXPORT_SYMBOL(vmalloc_earlyreserve);
84
85 /*
86  * If a p?d_bad entry is found while walking page tables, report
87  * the error, before resetting entry to p?d_none.  Usually (but
88  * very seldom) called out from the p?d_none_or_clear_bad macros.
89  */
90
91 void pgd_clear_bad(pgd_t *pgd)
92 {
93         pgd_ERROR(*pgd);
94         pgd_clear(pgd);
95 }
96
97 void pud_clear_bad(pud_t *pud)
98 {
99         pud_ERROR(*pud);
100         pud_clear(pud);
101 }
102
103 void pmd_clear_bad(pmd_t *pmd)
104 {
105         pmd_ERROR(*pmd);
106         pmd_clear(pmd);
107 }
108
109 /*
110  * Note: this doesn't free the actual pages themselves. That
111  * has been handled earlier when unmapping all the memory regions.
112  */
113 static void free_pte_range(struct mmu_gather *tlb, pmd_t *pmd)
114 {
115         struct page *page = pmd_page(*pmd);
116         pmd_clear(pmd);
117         pte_free_tlb(tlb, page);
118         dec_page_state(nr_page_table_pages);
119         tlb->mm->nr_ptes--;
120 }
121
122 static inline void free_pmd_range(struct mmu_gather *tlb, pud_t *pud,
123                                 unsigned long addr, unsigned long end,
124                                 unsigned long floor, unsigned long ceiling)
125 {
126         pmd_t *pmd;
127         unsigned long next;
128         unsigned long start;
129
130         start = addr;
131         pmd = pmd_offset(pud, addr);
132         do {
133                 next = pmd_addr_end(addr, end);
134                 if (pmd_none_or_clear_bad(pmd))
135                         continue;
136                 free_pte_range(tlb, pmd);
137         } while (pmd++, addr = next, addr != end);
138
139         start &= PUD_MASK;
140         if (start < floor)
141                 return;
142         if (ceiling) {
143                 ceiling &= PUD_MASK;
144                 if (!ceiling)
145                         return;
146         }
147         if (end - 1 > ceiling - 1)
148                 return;
149
150         pmd = pmd_offset(pud, start);
151         pud_clear(pud);
152         pmd_free_tlb(tlb, pmd);
153 }
154
155 static inline void free_pud_range(struct mmu_gather *tlb, pgd_t *pgd,
156                                 unsigned long addr, unsigned long end,
157                                 unsigned long floor, unsigned long ceiling)
158 {
159         pud_t *pud;
160         unsigned long next;
161         unsigned long start;
162
163         start = addr;
164         pud = pud_offset(pgd, addr);
165         do {
166                 next = pud_addr_end(addr, end);
167                 if (pud_none_or_clear_bad(pud))
168                         continue;
169                 free_pmd_range(tlb, pud, addr, next, floor, ceiling);
170         } while (pud++, addr = next, addr != end);
171
172         start &= PGDIR_MASK;
173         if (start < floor)
174                 return;
175         if (ceiling) {
176                 ceiling &= PGDIR_MASK;
177                 if (!ceiling)
178                         return;
179         }
180         if (end - 1 > ceiling - 1)
181                 return;
182
183         pud = pud_offset(pgd, start);
184         pgd_clear(pgd);
185         pud_free_tlb(tlb, pud);
186 }
187
188 /*
189  * This function frees user-level page tables of a process.
190  *
191  * Must be called with pagetable lock held.
192  */
193 void free_pgd_range(struct mmu_gather **tlb,
194                         unsigned long addr, unsigned long end,
195                         unsigned long floor, unsigned long ceiling)
196 {
197         pgd_t *pgd;
198         unsigned long next;
199         unsigned long start;
200
201         /*
202          * The next few lines have given us lots of grief...
203          *
204          * Why are we testing PMD* at this top level?  Because often
205          * there will be no work to do at all, and we'd prefer not to
206          * go all the way down to the bottom just to discover that.
207          *
208          * Why all these "- 1"s?  Because 0 represents both the bottom
209          * of the address space and the top of it (using -1 for the
210          * top wouldn't help much: the masks would do the wrong thing).
211          * The rule is that addr 0 and floor 0 refer to the bottom of
212          * the address space, but end 0 and ceiling 0 refer to the top
213          * Comparisons need to use "end - 1" and "ceiling - 1" (though
214          * that end 0 case should be mythical).
215          *
216          * Wherever addr is brought up or ceiling brought down, we must
217          * be careful to reject "the opposite 0" before it confuses the
218          * subsequent tests.  But what about where end is brought down
219          * by PMD_SIZE below? no, end can't go down to 0 there.
220          *
221          * Whereas we round start (addr) and ceiling down, by different
222          * masks at different levels, in order to test whether a table
223          * now has no other vmas using it, so can be freed, we don't
224          * bother to round floor or end up - the tests don't need that.
225          */
226
227         addr &= PMD_MASK;
228         if (addr < floor) {
229                 addr += PMD_SIZE;
230                 if (!addr)
231                         return;
232         }
233         if (ceiling) {
234                 ceiling &= PMD_MASK;
235                 if (!ceiling)
236                         return;
237         }
238         if (end - 1 > ceiling - 1)
239                 end -= PMD_SIZE;
240         if (addr > end - 1)
241                 return;
242
243         start = addr;
244         pgd = pgd_offset((*tlb)->mm, addr);
245         do {
246                 next = pgd_addr_end(addr, end);
247                 if (pgd_none_or_clear_bad(pgd))
248                         continue;
249                 free_pud_range(*tlb, pgd, addr, next, floor, ceiling);
250         } while (pgd++, addr = next, addr != end);
251
252         if (!(*tlb)->fullmm)
253                 flush_tlb_pgtables((*tlb)->mm, start, end);
254 }
255
256 void free_pgtables(struct mmu_gather **tlb, struct vm_area_struct *vma,
257                 unsigned long floor, unsigned long ceiling)
258 {
259         while (vma) {
260                 struct vm_area_struct *next = vma->vm_next;
261                 unsigned long addr = vma->vm_start;
262
263                 if (is_hugepage_only_range(vma->vm_mm, addr, HPAGE_SIZE)) {
264                         hugetlb_free_pgd_range(tlb, addr, vma->vm_end,
265                                 floor, next? next->vm_start: ceiling);
266                 } else {
267                         /*
268                          * Optimization: gather nearby vmas into one call down
269                          */
270                         while (next && next->vm_start <= vma->vm_end + PMD_SIZE
271                           && !is_hugepage_only_range(vma->vm_mm, next->vm_start,
272                                                         HPAGE_SIZE)) {
273                                 vma = next;
274                                 next = vma->vm_next;
275                         }
276                         free_pgd_range(tlb, addr, vma->vm_end,
277                                 floor, next? next->vm_start: ceiling);
278                 }
279                 vma = next;
280         }
281 }
282
283 int __pte_alloc(struct mm_struct *mm, pmd_t *pmd, unsigned long address)
284 {
285         struct page *new;
286
287         spin_unlock(&mm->page_table_lock);
288         new = pte_alloc_one(mm, address);
289         spin_lock(&mm->page_table_lock);
290         if (!new)
291                 return -ENOMEM;
292
293         if (pmd_present(*pmd))          /* Another has populated it */
294                 pte_free(new);
295         else {
296                 mm->nr_ptes++;
297                 inc_page_state(nr_page_table_pages);
298                 pmd_populate(mm, pmd, new);
299         }
300         return 0;
301 }
302
303 int __pte_alloc_kernel(pmd_t *pmd, unsigned long address)
304 {
305         pte_t *new = pte_alloc_one_kernel(&init_mm, address);
306         if (!new)
307                 return -ENOMEM;
308
309         spin_lock(&init_mm.page_table_lock);
310         if (pmd_present(*pmd))          /* Another has populated it */
311                 pte_free_kernel(new);
312         else
313                 pmd_populate_kernel(&init_mm, pmd, new);
314         spin_unlock(&init_mm.page_table_lock);
315         return 0;
316 }
317
318 static inline void add_mm_rss(struct mm_struct *mm, int file_rss, int anon_rss)
319 {
320         if (file_rss)
321                 add_mm_counter(mm, file_rss, file_rss);
322         if (anon_rss)
323                 add_mm_counter(mm, anon_rss, anon_rss);
324 }
325
326 /*
327  * This function is called to print an error when a pte in a
328  * !VM_RESERVED region is found pointing to an invalid pfn (which
329  * is an error.
330  *
331  * The calling function must still handle the error.
332  */
333 void print_bad_pte(struct vm_area_struct *vma, pte_t pte, unsigned long vaddr)
334 {
335         printk(KERN_ERR "Bad pte = %08llx, process = %s, "
336                         "vm_flags = %lx, vaddr = %lx\n",
337                 (long long)pte_val(pte),
338                 (vma->vm_mm == current->mm ? current->comm : "???"),
339                 vma->vm_flags, vaddr);
340         dump_stack();
341 }
342
343 /*
344  * copy one vm_area from one task to the other. Assumes the page tables
345  * already present in the new task to be cleared in the whole range
346  * covered by this vma.
347  *
348  * dst->page_table_lock is held on entry and exit,
349  * but may be dropped within p[mg]d_alloc() and pte_alloc_map().
350  */
351
352 static inline void
353 copy_one_pte(struct mm_struct *dst_mm, struct mm_struct *src_mm,
354                 pte_t *dst_pte, pte_t *src_pte, struct vm_area_struct *vma,
355                 unsigned long addr, int *rss)
356 {
357         unsigned long vm_flags = vma->vm_flags;
358         pte_t pte = *src_pte;
359         struct page *page;
360         unsigned long pfn;
361
362         /* pte contains position in swap or file, so copy. */
363         if (unlikely(!pte_present(pte))) {
364                 if (!pte_file(pte)) {
365                         swap_duplicate(pte_to_swp_entry(pte));
366                         /* make sure dst_mm is on swapoff's mmlist. */
367                         if (unlikely(list_empty(&dst_mm->mmlist))) {
368                                 spin_lock(&mmlist_lock);
369                                 list_add(&dst_mm->mmlist, &src_mm->mmlist);
370                                 spin_unlock(&mmlist_lock);
371                         }
372                 }
373                 goto out_set_pte;
374         }
375
376         /* If the region is VM_RESERVED, the mapping is not
377          * mapped via rmap - duplicate the pte as is.
378          */
379         if (vm_flags & VM_RESERVED)
380                 goto out_set_pte;
381
382         pfn = pte_pfn(pte);
383         /* If the pte points outside of valid memory but
384          * the region is not VM_RESERVED, we have a problem.
385          */
386         if (unlikely(!pfn_valid(pfn))) {
387                 print_bad_pte(vma, pte, addr);
388                 goto out_set_pte; /* try to do something sane */
389         }
390
391         page = pfn_to_page(pfn);
392
393         /*
394          * If it's a COW mapping, write protect it both
395          * in the parent and the child
396          */
397         if ((vm_flags & (VM_SHARED | VM_MAYWRITE)) == VM_MAYWRITE) {
398                 ptep_set_wrprotect(src_mm, addr, src_pte);
399                 pte = *src_pte;
400         }
401
402         /*
403          * If it's a shared mapping, mark it clean in
404          * the child
405          */
406         if (vm_flags & VM_SHARED)
407                 pte = pte_mkclean(pte);
408         pte = pte_mkold(pte);
409         get_page(page);
410         page_dup_rmap(page);
411         rss[!!PageAnon(page)]++;
412
413 out_set_pte:
414         set_pte_at(dst_mm, addr, dst_pte, pte);
415 }
416
417 static int copy_pte_range(struct mm_struct *dst_mm, struct mm_struct *src_mm,
418                 pmd_t *dst_pmd, pmd_t *src_pmd, struct vm_area_struct *vma,
419                 unsigned long addr, unsigned long end)
420 {
421         pte_t *src_pte, *dst_pte;
422         int progress = 0;
423         int rss[2];
424
425 again:
426         rss[1] = rss[0] = 0;
427         dst_pte = pte_alloc_map(dst_mm, dst_pmd, addr);
428         if (!dst_pte)
429                 return -ENOMEM;
430         src_pte = pte_offset_map_nested(src_pmd, addr);
431
432         spin_lock(&src_mm->page_table_lock);
433         do {
434                 /*
435                  * We are holding two locks at this point - either of them
436                  * could generate latencies in another task on another CPU.
437                  */
438                 if (progress >= 32) {
439                         progress = 0;
440                         if (need_resched() ||
441                             need_lockbreak(&src_mm->page_table_lock) ||
442                             need_lockbreak(&dst_mm->page_table_lock))
443                                 break;
444                 }
445                 if (pte_none(*src_pte)) {
446                         progress++;
447                         continue;
448                 }
449                 copy_one_pte(dst_mm, src_mm, dst_pte, src_pte, vma, addr, rss);
450                 progress += 8;
451         } while (dst_pte++, src_pte++, addr += PAGE_SIZE, addr != end);
452         spin_unlock(&src_mm->page_table_lock);
453
454         pte_unmap_nested(src_pte - 1);
455         pte_unmap(dst_pte - 1);
456         add_mm_rss(dst_mm, rss[0], rss[1]);
457         cond_resched_lock(&dst_mm->page_table_lock);
458         if (addr != end)
459                 goto again;
460         return 0;
461 }
462
463 static inline int copy_pmd_range(struct mm_struct *dst_mm, struct mm_struct *src_mm,
464                 pud_t *dst_pud, pud_t *src_pud, struct vm_area_struct *vma,
465                 unsigned long addr, unsigned long end)
466 {
467         pmd_t *src_pmd, *dst_pmd;
468         unsigned long next;
469
470         dst_pmd = pmd_alloc(dst_mm, dst_pud, addr);
471         if (!dst_pmd)
472                 return -ENOMEM;
473         src_pmd = pmd_offset(src_pud, addr);
474         do {
475                 next = pmd_addr_end(addr, end);
476                 if (pmd_none_or_clear_bad(src_pmd))
477                         continue;
478                 if (copy_pte_range(dst_mm, src_mm, dst_pmd, src_pmd,
479                                                 vma, addr, next))
480                         return -ENOMEM;
481         } while (dst_pmd++, src_pmd++, addr = next, addr != end);
482         return 0;
483 }
484
485 static inline int copy_pud_range(struct mm_struct *dst_mm, struct mm_struct *src_mm,
486                 pgd_t *dst_pgd, pgd_t *src_pgd, struct vm_area_struct *vma,
487                 unsigned long addr, unsigned long end)
488 {
489         pud_t *src_pud, *dst_pud;
490         unsigned long next;
491
492         dst_pud = pud_alloc(dst_mm, dst_pgd, addr);
493         if (!dst_pud)
494                 return -ENOMEM;
495         src_pud = pud_offset(src_pgd, addr);
496         do {
497                 next = pud_addr_end(addr, end);
498                 if (pud_none_or_clear_bad(src_pud))
499                         continue;
500                 if (copy_pmd_range(dst_mm, src_mm, dst_pud, src_pud,
501                                                 vma, addr, next))
502                         return -ENOMEM;
503         } while (dst_pud++, src_pud++, addr = next, addr != end);
504         return 0;
505 }
506
507 int copy_page_range(struct mm_struct *dst_mm, struct mm_struct *src_mm,
508                 struct vm_area_struct *vma)
509 {
510         pgd_t *src_pgd, *dst_pgd;
511         unsigned long next;
512         unsigned long addr = vma->vm_start;
513         unsigned long end = vma->vm_end;
514
515         /*
516          * Don't copy ptes where a page fault will fill them correctly.
517          * Fork becomes much lighter when there are big shared or private
518          * readonly mappings. The tradeoff is that copy_page_range is more
519          * efficient than faulting.
520          */
521         if (!(vma->vm_flags & (VM_HUGETLB|VM_NONLINEAR|VM_RESERVED))) {
522                 if (!vma->anon_vma)
523                         return 0;
524         }
525
526         if (is_vm_hugetlb_page(vma))
527                 return copy_hugetlb_page_range(dst_mm, src_mm, vma);
528
529         dst_pgd = pgd_offset(dst_mm, addr);
530         src_pgd = pgd_offset(src_mm, addr);
531         do {
532                 next = pgd_addr_end(addr, end);
533                 if (pgd_none_or_clear_bad(src_pgd))
534                         continue;
535                 if (copy_pud_range(dst_mm, src_mm, dst_pgd, src_pgd,
536                                                 vma, addr, next))
537                         return -ENOMEM;
538         } while (dst_pgd++, src_pgd++, addr = next, addr != end);
539         return 0;
540 }
541
542 static void zap_pte_range(struct mmu_gather *tlb,
543                                 struct vm_area_struct *vma, pmd_t *pmd,
544                                 unsigned long addr, unsigned long end,
545                                 struct zap_details *details)
546 {
547         struct mm_struct *mm = tlb->mm;
548         pte_t *pte;
549         int file_rss = 0;
550         int anon_rss = 0;
551
552         pte = pte_offset_map(pmd, addr);
553         do {
554                 pte_t ptent = *pte;
555                 if (pte_none(ptent))
556                         continue;
557                 if (pte_present(ptent)) {
558                         struct page *page = NULL;
559                         if (!(vma->vm_flags & VM_RESERVED)) {
560                                 unsigned long pfn = pte_pfn(ptent);
561                                 if (unlikely(!pfn_valid(pfn)))
562                                         print_bad_pte(vma, ptent, addr);
563                                 else
564                                         page = pfn_to_page(pfn);
565                         }
566                         if (unlikely(details) && page) {
567                                 /*
568                                  * unmap_shared_mapping_pages() wants to
569                                  * invalidate cache without truncating:
570                                  * unmap shared but keep private pages.
571                                  */
572                                 if (details->check_mapping &&
573                                     details->check_mapping != page->mapping)
574                                         continue;
575                                 /*
576                                  * Each page->index must be checked when
577                                  * invalidating or truncating nonlinear.
578                                  */
579                                 if (details->nonlinear_vma &&
580                                     (page->index < details->first_index ||
581                                      page->index > details->last_index))
582                                         continue;
583                         }
584                         ptent = ptep_get_and_clear_full(mm, addr, pte,
585                                                         tlb->fullmm);
586                         tlb_remove_tlb_entry(tlb, pte, addr);
587                         if (unlikely(!page))
588                                 continue;
589                         if (unlikely(details) && details->nonlinear_vma
590                             && linear_page_index(details->nonlinear_vma,
591                                                 addr) != page->index)
592                                 set_pte_at(mm, addr, pte,
593                                            pgoff_to_pte(page->index));
594                         if (PageAnon(page))
595                                 anon_rss--;
596                         else {
597                                 if (pte_dirty(ptent))
598                                         set_page_dirty(page);
599                                 if (pte_young(ptent))
600                                         mark_page_accessed(page);
601                                 file_rss--;
602                         }
603                         page_remove_rmap(page);
604                         tlb_remove_page(tlb, page);
605                         continue;
606                 }
607                 /*
608                  * If details->check_mapping, we leave swap entries;
609                  * if details->nonlinear_vma, we leave file entries.
610                  */
611                 if (unlikely(details))
612                         continue;
613                 if (!pte_file(ptent))
614                         free_swap_and_cache(pte_to_swp_entry(ptent));
615                 pte_clear_full(mm, addr, pte, tlb->fullmm);
616         } while (pte++, addr += PAGE_SIZE, addr != end);
617
618         add_mm_rss(mm, file_rss, anon_rss);
619         pte_unmap(pte - 1);
620 }
621
622 static inline void zap_pmd_range(struct mmu_gather *tlb,
623                                 struct vm_area_struct *vma, pud_t *pud,
624                                 unsigned long addr, unsigned long end,
625                                 struct zap_details *details)
626 {
627         pmd_t *pmd;
628         unsigned long next;
629
630         pmd = pmd_offset(pud, addr);
631         do {
632                 next = pmd_addr_end(addr, end);
633                 if (pmd_none_or_clear_bad(pmd))
634                         continue;
635                 zap_pte_range(tlb, vma, pmd, addr, next, details);
636         } while (pmd++, addr = next, addr != end);
637 }
638
639 static inline void zap_pud_range(struct mmu_gather *tlb,
640                                 struct vm_area_struct *vma, pgd_t *pgd,
641                                 unsigned long addr, unsigned long end,
642                                 struct zap_details *details)
643 {
644         pud_t *pud;
645         unsigned long next;
646
647         pud = pud_offset(pgd, addr);
648         do {
649                 next = pud_addr_end(addr, end);
650                 if (pud_none_or_clear_bad(pud))
651                         continue;
652                 zap_pmd_range(tlb, vma, pud, addr, next, details);
653         } while (pud++, addr = next, addr != end);
654 }
655
656 static void unmap_page_range(struct mmu_gather *tlb, struct vm_area_struct *vma,
657                                 unsigned long addr, unsigned long end,
658                                 struct zap_details *details)
659 {
660         pgd_t *pgd;
661         unsigned long next;
662
663         if (details && !details->check_mapping && !details->nonlinear_vma)
664                 details = NULL;
665
666         BUG_ON(addr >= end);
667         tlb_start_vma(tlb, vma);
668         pgd = pgd_offset(vma->vm_mm, addr);
669         do {
670                 next = pgd_addr_end(addr, end);
671                 if (pgd_none_or_clear_bad(pgd))
672                         continue;
673                 zap_pud_range(tlb, vma, pgd, addr, next, details);
674         } while (pgd++, addr = next, addr != end);
675         tlb_end_vma(tlb, vma);
676 }
677
678 #ifdef CONFIG_PREEMPT
679 # define ZAP_BLOCK_SIZE (8 * PAGE_SIZE)
680 #else
681 /* No preempt: go for improved straight-line efficiency */
682 # define ZAP_BLOCK_SIZE (1024 * PAGE_SIZE)
683 #endif
684
685 /**
686  * unmap_vmas - unmap a range of memory covered by a list of vma's
687  * @tlbp: address of the caller's struct mmu_gather
688  * @mm: the controlling mm_struct
689  * @vma: the starting vma
690  * @start_addr: virtual address at which to start unmapping
691  * @end_addr: virtual address at which to end unmapping
692  * @nr_accounted: Place number of unmapped pages in vm-accountable vma's here
693  * @details: details of nonlinear truncation or shared cache invalidation
694  *
695  * Returns the end address of the unmapping (restart addr if interrupted).
696  *
697  * Unmap all pages in the vma list.  Called under page_table_lock.
698  *
699  * We aim to not hold page_table_lock for too long (for scheduling latency
700  * reasons).  So zap pages in ZAP_BLOCK_SIZE bytecounts.  This means we need to
701  * return the ending mmu_gather to the caller.
702  *
703  * Only addresses between `start' and `end' will be unmapped.
704  *
705  * The VMA list must be sorted in ascending virtual address order.
706  *
707  * unmap_vmas() assumes that the caller will flush the whole unmapped address
708  * range after unmap_vmas() returns.  So the only responsibility here is to
709  * ensure that any thus-far unmapped pages are flushed before unmap_vmas()
710  * drops the lock and schedules.
711  */
712 unsigned long unmap_vmas(struct mmu_gather **tlbp, struct mm_struct *mm,
713                 struct vm_area_struct *vma, unsigned long start_addr,
714                 unsigned long end_addr, unsigned long *nr_accounted,
715                 struct zap_details *details)
716 {
717         unsigned long zap_bytes = ZAP_BLOCK_SIZE;
718         unsigned long tlb_start = 0;    /* For tlb_finish_mmu */
719         int tlb_start_valid = 0;
720         unsigned long start = start_addr;
721         spinlock_t *i_mmap_lock = details? details->i_mmap_lock: NULL;
722         int fullmm = (*tlbp)->fullmm;
723
724         for ( ; vma && vma->vm_start < end_addr; vma = vma->vm_next) {
725                 unsigned long end;
726
727                 start = max(vma->vm_start, start_addr);
728                 if (start >= vma->vm_end)
729                         continue;
730                 end = min(vma->vm_end, end_addr);
731                 if (end <= vma->vm_start)
732                         continue;
733
734                 if (vma->vm_flags & VM_ACCOUNT)
735                         *nr_accounted += (end - start) >> PAGE_SHIFT;
736
737                 while (start != end) {
738                         unsigned long block;
739
740                         if (!tlb_start_valid) {
741                                 tlb_start = start;
742                                 tlb_start_valid = 1;
743                         }
744
745                         if (is_vm_hugetlb_page(vma)) {
746                                 block = end - start;
747                                 unmap_hugepage_range(vma, start, end);
748                         } else {
749                                 block = min(zap_bytes, end - start);
750                                 unmap_page_range(*tlbp, vma, start,
751                                                 start + block, details);
752                         }
753
754                         start += block;
755                         zap_bytes -= block;
756                         if ((long)zap_bytes > 0)
757                                 continue;
758
759                         tlb_finish_mmu(*tlbp, tlb_start, start);
760
761                         if (need_resched() ||
762                                 need_lockbreak(&mm->page_table_lock) ||
763                                 (i_mmap_lock && need_lockbreak(i_mmap_lock))) {
764                                 if (i_mmap_lock) {
765                                         /* must reset count of rss freed */
766                                         *tlbp = tlb_gather_mmu(mm, fullmm);
767                                         goto out;
768                                 }
769                                 spin_unlock(&mm->page_table_lock);
770                                 cond_resched();
771                                 spin_lock(&mm->page_table_lock);
772                         }
773
774                         *tlbp = tlb_gather_mmu(mm, fullmm);
775                         tlb_start_valid = 0;
776                         zap_bytes = ZAP_BLOCK_SIZE;
777                 }
778         }
779 out:
780         return start;   /* which is now the end (or restart) address */
781 }
782
783 /**
784  * zap_page_range - remove user pages in a given range
785  * @vma: vm_area_struct holding the applicable pages
786  * @address: starting address of pages to zap
787  * @size: number of bytes to zap
788  * @details: details of nonlinear truncation or shared cache invalidation
789  */
790 unsigned long zap_page_range(struct vm_area_struct *vma, unsigned long address,
791                 unsigned long size, struct zap_details *details)
792 {
793         struct mm_struct *mm = vma->vm_mm;
794         struct mmu_gather *tlb;
795         unsigned long end = address + size;
796         unsigned long nr_accounted = 0;
797
798         if (is_vm_hugetlb_page(vma)) {
799                 zap_hugepage_range(vma, address, size);
800                 return end;
801         }
802
803         lru_add_drain();
804         spin_lock(&mm->page_table_lock);
805         tlb = tlb_gather_mmu(mm, 0);
806         update_hiwater_rss(mm);
807         end = unmap_vmas(&tlb, mm, vma, address, end, &nr_accounted, details);
808         tlb_finish_mmu(tlb, address, end);
809         spin_unlock(&mm->page_table_lock);
810         return end;
811 }
812
813 /*
814  * Do a quick page-table lookup for a single page.
815  * mm->page_table_lock must be held.
816  */
817 static struct page *__follow_page(struct mm_struct *mm, unsigned long address,
818                         int read, int write, int accessed)
819 {
820         pgd_t *pgd;
821         pud_t *pud;
822         pmd_t *pmd;
823         pte_t *ptep, pte;
824         unsigned long pfn;
825         struct page *page;
826
827         page = follow_huge_addr(mm, address, write);
828         if (! IS_ERR(page))
829                 return page;
830
831         pgd = pgd_offset(mm, address);
832         if (pgd_none(*pgd) || unlikely(pgd_bad(*pgd)))
833                 goto out;
834
835         pud = pud_offset(pgd, address);
836         if (pud_none(*pud) || unlikely(pud_bad(*pud)))
837                 goto out;
838         
839         pmd = pmd_offset(pud, address);
840         if (pmd_none(*pmd) || unlikely(pmd_bad(*pmd)))
841                 goto out;
842         if (pmd_huge(*pmd))
843                 return follow_huge_pmd(mm, address, pmd, write);
844
845         ptep = pte_offset_map(pmd, address);
846         if (!ptep)
847                 goto out;
848
849         pte = *ptep;
850         pte_unmap(ptep);
851         if (pte_present(pte)) {
852                 if (write && !pte_write(pte))
853                         goto out;
854                 if (read && !pte_read(pte))
855                         goto out;
856                 pfn = pte_pfn(pte);
857                 if (pfn_valid(pfn)) {
858                         page = pfn_to_page(pfn);
859                         if (accessed) {
860                                 if (write && !pte_dirty(pte) &&!PageDirty(page))
861                                         set_page_dirty(page);
862                                 mark_page_accessed(page);
863                         }
864                         return page;
865                 }
866         }
867
868 out:
869         return NULL;
870 }
871
872 inline struct page *
873 follow_page(struct mm_struct *mm, unsigned long address, int write)
874 {
875         return __follow_page(mm, address, 0, write, 1);
876 }
877
878 /*
879  * check_user_page_readable() can be called frm niterrupt context by oprofile,
880  * so we need to avoid taking any non-irq-safe locks
881  */
882 int check_user_page_readable(struct mm_struct *mm, unsigned long address)
883 {
884         return __follow_page(mm, address, 1, 0, 0) != NULL;
885 }
886 EXPORT_SYMBOL(check_user_page_readable);
887
888 static inline int
889 untouched_anonymous_page(struct mm_struct* mm, struct vm_area_struct *vma,
890                          unsigned long address)
891 {
892         pgd_t *pgd;
893         pud_t *pud;
894         pmd_t *pmd;
895
896         /* Check if the vma is for an anonymous mapping. */
897         if (vma->vm_ops && vma->vm_ops->nopage)
898                 return 0;
899
900         /* Check if page directory entry exists. */
901         pgd = pgd_offset(mm, address);
902         if (pgd_none(*pgd) || unlikely(pgd_bad(*pgd)))
903                 return 1;
904
905         pud = pud_offset(pgd, address);
906         if (pud_none(*pud) || unlikely(pud_bad(*pud)))
907                 return 1;
908
909         /* Check if page middle directory entry exists. */
910         pmd = pmd_offset(pud, address);
911         if (pmd_none(*pmd) || unlikely(pmd_bad(*pmd)))
912                 return 1;
913
914         /* There is a pte slot for 'address' in 'mm'. */
915         return 0;
916 }
917
918 int get_user_pages(struct task_struct *tsk, struct mm_struct *mm,
919                 unsigned long start, int len, int write, int force,
920                 struct page **pages, struct vm_area_struct **vmas)
921 {
922         int i;
923         unsigned int flags;
924
925         /* 
926          * Require read or write permissions.
927          * If 'force' is set, we only require the "MAY" flags.
928          */
929         flags = write ? (VM_WRITE | VM_MAYWRITE) : (VM_READ | VM_MAYREAD);
930         flags &= force ? (VM_MAYREAD | VM_MAYWRITE) : (VM_READ | VM_WRITE);
931         i = 0;
932
933         do {
934                 struct vm_area_struct * vma;
935
936                 vma = find_extend_vma(mm, start);
937                 if (!vma && in_gate_area(tsk, start)) {
938                         unsigned long pg = start & PAGE_MASK;
939                         struct vm_area_struct *gate_vma = get_gate_vma(tsk);
940                         pgd_t *pgd;
941                         pud_t *pud;
942                         pmd_t *pmd;
943                         pte_t *pte;
944                         if (write) /* user gate pages are read-only */
945                                 return i ? : -EFAULT;
946                         if (pg > TASK_SIZE)
947                                 pgd = pgd_offset_k(pg);
948                         else
949                                 pgd = pgd_offset_gate(mm, pg);
950                         BUG_ON(pgd_none(*pgd));
951                         pud = pud_offset(pgd, pg);
952                         BUG_ON(pud_none(*pud));
953                         pmd = pmd_offset(pud, pg);
954                         if (pmd_none(*pmd))
955                                 return i ? : -EFAULT;
956                         pte = pte_offset_map(pmd, pg);
957                         if (pte_none(*pte)) {
958                                 pte_unmap(pte);
959                                 return i ? : -EFAULT;
960                         }
961                         if (pages) {
962                                 pages[i] = pte_page(*pte);
963                                 get_page(pages[i]);
964                         }
965                         pte_unmap(pte);
966                         if (vmas)
967                                 vmas[i] = gate_vma;
968                         i++;
969                         start += PAGE_SIZE;
970                         len--;
971                         continue;
972                 }
973
974                 if (!vma || (vma->vm_flags & (VM_IO | VM_RESERVED))
975                                 || !(flags & vma->vm_flags))
976                         return i ? : -EFAULT;
977
978                 if (is_vm_hugetlb_page(vma)) {
979                         i = follow_hugetlb_page(mm, vma, pages, vmas,
980                                                 &start, &len, i);
981                         continue;
982                 }
983                 spin_lock(&mm->page_table_lock);
984                 do {
985                         int write_access = write;
986                         struct page *page;
987
988                         cond_resched_lock(&mm->page_table_lock);
989                         while (!(page = follow_page(mm, start, write_access))) {
990                                 int ret;
991
992                                 /*
993                                  * Shortcut for anonymous pages. We don't want
994                                  * to force the creation of pages tables for
995                                  * insanely big anonymously mapped areas that
996                                  * nobody touched so far. This is important
997                                  * for doing a core dump for these mappings.
998                                  */
999                                 if (!write && untouched_anonymous_page(mm,vma,start)) {
1000                                         page = ZERO_PAGE(start);
1001                                         break;
1002                                 }
1003                                 spin_unlock(&mm->page_table_lock);
1004                                 ret = __handle_mm_fault(mm, vma, start, write_access);
1005
1006                                 /*
1007                                  * The VM_FAULT_WRITE bit tells us that do_wp_page has
1008                                  * broken COW when necessary, even if maybe_mkwrite
1009                                  * decided not to set pte_write. We can thus safely do
1010                                  * subsequent page lookups as if they were reads.
1011                                  */
1012                                 if (ret & VM_FAULT_WRITE)
1013                                         write_access = 0;
1014                                 
1015                                 switch (ret & ~VM_FAULT_WRITE) {
1016                                 case VM_FAULT_MINOR:
1017                                         tsk->min_flt++;
1018                                         break;
1019                                 case VM_FAULT_MAJOR:
1020                                         tsk->maj_flt++;
1021                                         break;
1022                                 case VM_FAULT_SIGBUS:
1023                                         return i ? i : -EFAULT;
1024                                 case VM_FAULT_OOM:
1025                                         return i ? i : -ENOMEM;
1026                                 default:
1027                                         BUG();
1028                                 }
1029                                 spin_lock(&mm->page_table_lock);
1030                         }
1031                         if (pages) {
1032                                 pages[i] = page;
1033                                 flush_dcache_page(page);
1034                                 page_cache_get(page);
1035                         }
1036                         if (vmas)
1037                                 vmas[i] = vma;
1038                         i++;
1039                         start += PAGE_SIZE;
1040                         len--;
1041                 } while (len && start < vma->vm_end);
1042                 spin_unlock(&mm->page_table_lock);
1043         } while (len);
1044         return i;
1045 }
1046 EXPORT_SYMBOL(get_user_pages);
1047
1048 static int zeromap_pte_range(struct mm_struct *mm, pmd_t *pmd,
1049                         unsigned long addr, unsigned long end, pgprot_t prot)
1050 {
1051         pte_t *pte;
1052
1053         pte = pte_alloc_map(mm, pmd, addr);
1054         if (!pte)
1055                 return -ENOMEM;
1056         do {
1057                 struct page *page = ZERO_PAGE(addr);
1058                 pte_t zero_pte = pte_wrprotect(mk_pte(page, prot));
1059                 page_cache_get(page);
1060                 page_add_file_rmap(page);
1061                 inc_mm_counter(mm, file_rss);
1062                 BUG_ON(!pte_none(*pte));
1063                 set_pte_at(mm, addr, pte, zero_pte);
1064         } while (pte++, addr += PAGE_SIZE, addr != end);
1065         pte_unmap(pte - 1);
1066         return 0;
1067 }
1068
1069 static inline int zeromap_pmd_range(struct mm_struct *mm, pud_t *pud,
1070                         unsigned long addr, unsigned long end, pgprot_t prot)
1071 {
1072         pmd_t *pmd;
1073         unsigned long next;
1074
1075         pmd = pmd_alloc(mm, pud, addr);
1076         if (!pmd)
1077                 return -ENOMEM;
1078         do {
1079                 next = pmd_addr_end(addr, end);
1080                 if (zeromap_pte_range(mm, pmd, addr, next, prot))
1081                         return -ENOMEM;
1082         } while (pmd++, addr = next, addr != end);
1083         return 0;
1084 }
1085
1086 static inline int zeromap_pud_range(struct mm_struct *mm, pgd_t *pgd,
1087                         unsigned long addr, unsigned long end, pgprot_t prot)
1088 {
1089         pud_t *pud;
1090         unsigned long next;
1091
1092         pud = pud_alloc(mm, pgd, addr);
1093         if (!pud)
1094                 return -ENOMEM;
1095         do {
1096                 next = pud_addr_end(addr, end);
1097                 if (zeromap_pmd_range(mm, pud, addr, next, prot))
1098                         return -ENOMEM;
1099         } while (pud++, addr = next, addr != end);
1100         return 0;
1101 }
1102
1103 int zeromap_page_range(struct vm_area_struct *vma,
1104                         unsigned long addr, unsigned long size, pgprot_t prot)
1105 {
1106         pgd_t *pgd;
1107         unsigned long next;
1108         unsigned long end = addr + size;
1109         struct mm_struct *mm = vma->vm_mm;
1110         int err;
1111
1112         BUG_ON(addr >= end);
1113         pgd = pgd_offset(mm, addr);
1114         flush_cache_range(vma, addr, end);
1115         spin_lock(&mm->page_table_lock);
1116         do {
1117                 next = pgd_addr_end(addr, end);
1118                 err = zeromap_pud_range(mm, pgd, addr, next, prot);
1119                 if (err)
1120                         break;
1121         } while (pgd++, addr = next, addr != end);
1122         spin_unlock(&mm->page_table_lock);
1123         return err;
1124 }
1125
1126 /*
1127  * maps a range of physical memory into the requested pages. the old
1128  * mappings are removed. any references to nonexistent pages results
1129  * in null mappings (currently treated as "copy-on-access")
1130  */
1131 static int remap_pte_range(struct mm_struct *mm, pmd_t *pmd,
1132                         unsigned long addr, unsigned long end,
1133                         unsigned long pfn, pgprot_t prot)
1134 {
1135         pte_t *pte;
1136
1137         pte = pte_alloc_map(mm, pmd, addr);
1138         if (!pte)
1139                 return -ENOMEM;
1140         do {
1141                 BUG_ON(!pte_none(*pte));
1142                 set_pte_at(mm, addr, pte, pfn_pte(pfn, prot));
1143                 pfn++;
1144         } while (pte++, addr += PAGE_SIZE, addr != end);
1145         pte_unmap(pte - 1);
1146         return 0;
1147 }
1148
1149 static inline int remap_pmd_range(struct mm_struct *mm, pud_t *pud,
1150                         unsigned long addr, unsigned long end,
1151                         unsigned long pfn, pgprot_t prot)
1152 {
1153         pmd_t *pmd;
1154         unsigned long next;
1155
1156         pfn -= addr >> PAGE_SHIFT;
1157         pmd = pmd_alloc(mm, pud, addr);
1158         if (!pmd)
1159                 return -ENOMEM;
1160         do {
1161                 next = pmd_addr_end(addr, end);
1162                 if (remap_pte_range(mm, pmd, addr, next,
1163                                 pfn + (addr >> PAGE_SHIFT), prot))
1164                         return -ENOMEM;
1165         } while (pmd++, addr = next, addr != end);
1166         return 0;
1167 }
1168
1169 static inline int remap_pud_range(struct mm_struct *mm, pgd_t *pgd,
1170                         unsigned long addr, unsigned long end,
1171                         unsigned long pfn, pgprot_t prot)
1172 {
1173         pud_t *pud;
1174         unsigned long next;
1175
1176         pfn -= addr >> PAGE_SHIFT;
1177         pud = pud_alloc(mm, pgd, addr);
1178         if (!pud)
1179                 return -ENOMEM;
1180         do {
1181                 next = pud_addr_end(addr, end);
1182                 if (remap_pmd_range(mm, pud, addr, next,
1183                                 pfn + (addr >> PAGE_SHIFT), prot))
1184                         return -ENOMEM;
1185         } while (pud++, addr = next, addr != end);
1186         return 0;
1187 }
1188
1189 /*  Note: this is only safe if the mm semaphore is held when called. */
1190 int remap_pfn_range(struct vm_area_struct *vma, unsigned long addr,
1191                     unsigned long pfn, unsigned long size, pgprot_t prot)
1192 {
1193         pgd_t *pgd;
1194         unsigned long next;
1195         unsigned long end = addr + PAGE_ALIGN(size);
1196         struct mm_struct *mm = vma->vm_mm;
1197         int err;
1198
1199         /*
1200          * Physically remapped pages are special. Tell the
1201          * rest of the world about it:
1202          *   VM_IO tells people not to look at these pages
1203          *      (accesses can have side effects).
1204          *   VM_RESERVED tells the core MM not to "manage" these pages
1205          *      (e.g. refcount, mapcount, try to swap them out).
1206          */
1207         vma->vm_flags |= VM_IO | VM_RESERVED;
1208
1209         BUG_ON(addr >= end);
1210         pfn -= addr >> PAGE_SHIFT;
1211         pgd = pgd_offset(mm, addr);
1212         flush_cache_range(vma, addr, end);
1213         spin_lock(&mm->page_table_lock);
1214         do {
1215                 next = pgd_addr_end(addr, end);
1216                 err = remap_pud_range(mm, pgd, addr, next,
1217                                 pfn + (addr >> PAGE_SHIFT), prot);
1218                 if (err)
1219                         break;
1220         } while (pgd++, addr = next, addr != end);
1221         spin_unlock(&mm->page_table_lock);
1222         return err;
1223 }
1224 EXPORT_SYMBOL(remap_pfn_range);
1225
1226 /*
1227  * Do pte_mkwrite, but only if the vma says VM_WRITE.  We do this when
1228  * servicing faults for write access.  In the normal case, do always want
1229  * pte_mkwrite.  But get_user_pages can cause write faults for mappings
1230  * that do not have writing enabled, when used by access_process_vm.
1231  */
1232 static inline pte_t maybe_mkwrite(pte_t pte, struct vm_area_struct *vma)
1233 {
1234         if (likely(vma->vm_flags & VM_WRITE))
1235                 pte = pte_mkwrite(pte);
1236         return pte;
1237 }
1238
1239 /*
1240  * This routine handles present pages, when users try to write
1241  * to a shared page. It is done by copying the page to a new address
1242  * and decrementing the shared-page counter for the old page.
1243  *
1244  * Note that this routine assumes that the protection checks have been
1245  * done by the caller (the low-level page fault routine in most cases).
1246  * Thus we can safely just mark it writable once we've done any necessary
1247  * COW.
1248  *
1249  * We also mark the page dirty at this point even though the page will
1250  * change only once the write actually happens. This avoids a few races,
1251  * and potentially makes it more efficient.
1252  *
1253  * We hold the mm semaphore and the page_table_lock on entry and exit
1254  * with the page_table_lock released.
1255  */
1256 static int do_wp_page(struct mm_struct *mm, struct vm_area_struct *vma,
1257                 unsigned long address, pte_t *page_table, pmd_t *pmd,
1258                 pte_t orig_pte)
1259 {
1260         struct page *old_page, *new_page;
1261         unsigned long pfn = pte_pfn(orig_pte);
1262         pte_t entry;
1263         int ret = VM_FAULT_MINOR;
1264
1265         BUG_ON(vma->vm_flags & VM_RESERVED);
1266
1267         if (unlikely(!pfn_valid(pfn))) {
1268                 /*
1269                  * Page table corrupted: show pte and kill process.
1270                  */
1271                 print_bad_pte(vma, orig_pte, address);
1272                 ret = VM_FAULT_OOM;
1273                 goto unlock;
1274         }
1275         old_page = pfn_to_page(pfn);
1276
1277         if (PageAnon(old_page) && !TestSetPageLocked(old_page)) {
1278                 int reuse = can_share_swap_page(old_page);
1279                 unlock_page(old_page);
1280                 if (reuse) {
1281                         flush_cache_page(vma, address, pfn);
1282                         entry = pte_mkyoung(orig_pte);
1283                         entry = maybe_mkwrite(pte_mkdirty(entry), vma);
1284                         ptep_set_access_flags(vma, address, page_table, entry, 1);
1285                         update_mmu_cache(vma, address, entry);
1286                         lazy_mmu_prot_update(entry);
1287                         ret |= VM_FAULT_WRITE;
1288                         goto unlock;
1289                 }
1290         }
1291
1292         /*
1293          * Ok, we need to copy. Oh, well..
1294          */
1295         page_cache_get(old_page);
1296         pte_unmap(page_table);
1297         spin_unlock(&mm->page_table_lock);
1298
1299         if (unlikely(anon_vma_prepare(vma)))
1300                 goto oom;
1301         if (old_page == ZERO_PAGE(address)) {
1302                 new_page = alloc_zeroed_user_highpage(vma, address);
1303                 if (!new_page)
1304                         goto oom;
1305         } else {
1306                 new_page = alloc_page_vma(GFP_HIGHUSER, vma, address);
1307                 if (!new_page)
1308                         goto oom;
1309                 copy_user_highpage(new_page, old_page, address);
1310         }
1311
1312         /*
1313          * Re-check the pte - we dropped the lock
1314          */
1315         spin_lock(&mm->page_table_lock);
1316         page_table = pte_offset_map(pmd, address);
1317         if (likely(pte_same(*page_table, orig_pte))) {
1318                 page_remove_rmap(old_page);
1319                 if (!PageAnon(old_page)) {
1320                         inc_mm_counter(mm, anon_rss);
1321                         dec_mm_counter(mm, file_rss);
1322                 }
1323                 flush_cache_page(vma, address, pfn);
1324                 entry = mk_pte(new_page, vma->vm_page_prot);
1325                 entry = maybe_mkwrite(pte_mkdirty(entry), vma);
1326                 ptep_establish(vma, address, page_table, entry);
1327                 update_mmu_cache(vma, address, entry);
1328                 lazy_mmu_prot_update(entry);
1329
1330                 lru_cache_add_active(new_page);
1331                 page_add_anon_rmap(new_page, vma, address);
1332
1333                 /* Free the old page.. */
1334                 new_page = old_page;
1335                 ret |= VM_FAULT_WRITE;
1336         }
1337         page_cache_release(new_page);
1338         page_cache_release(old_page);
1339 unlock:
1340         pte_unmap(page_table);
1341         spin_unlock(&mm->page_table_lock);
1342         return ret;
1343 oom:
1344         page_cache_release(old_page);
1345         return VM_FAULT_OOM;
1346 }
1347
1348 /*
1349  * Helper functions for unmap_mapping_range().
1350  *
1351  * __ Notes on dropping i_mmap_lock to reduce latency while unmapping __
1352  *
1353  * We have to restart searching the prio_tree whenever we drop the lock,
1354  * since the iterator is only valid while the lock is held, and anyway
1355  * a later vma might be split and reinserted earlier while lock dropped.
1356  *
1357  * The list of nonlinear vmas could be handled more efficiently, using
1358  * a placeholder, but handle it in the same way until a need is shown.
1359  * It is important to search the prio_tree before nonlinear list: a vma
1360  * may become nonlinear and be shifted from prio_tree to nonlinear list
1361  * while the lock is dropped; but never shifted from list to prio_tree.
1362  *
1363  * In order to make forward progress despite restarting the search,
1364  * vm_truncate_count is used to mark a vma as now dealt with, so we can
1365  * quickly skip it next time around.  Since the prio_tree search only
1366  * shows us those vmas affected by unmapping the range in question, we
1367  * can't efficiently keep all vmas in step with mapping->truncate_count:
1368  * so instead reset them all whenever it wraps back to 0 (then go to 1).
1369  * mapping->truncate_count and vma->vm_truncate_count are protected by
1370  * i_mmap_lock.
1371  *
1372  * In order to make forward progress despite repeatedly restarting some
1373  * large vma, note the restart_addr from unmap_vmas when it breaks out:
1374  * and restart from that address when we reach that vma again.  It might
1375  * have been split or merged, shrunk or extended, but never shifted: so
1376  * restart_addr remains valid so long as it remains in the vma's range.
1377  * unmap_mapping_range forces truncate_count to leap over page-aligned
1378  * values so we can save vma's restart_addr in its truncate_count field.
1379  */
1380 #define is_restart_addr(truncate_count) (!((truncate_count) & ~PAGE_MASK))
1381
1382 static void reset_vma_truncate_counts(struct address_space *mapping)
1383 {
1384         struct vm_area_struct *vma;
1385         struct prio_tree_iter iter;
1386
1387         vma_prio_tree_foreach(vma, &iter, &mapping->i_mmap, 0, ULONG_MAX)
1388                 vma->vm_truncate_count = 0;
1389         list_for_each_entry(vma, &mapping->i_mmap_nonlinear, shared.vm_set.list)
1390                 vma->vm_truncate_count = 0;
1391 }
1392
1393 static int unmap_mapping_range_vma(struct vm_area_struct *vma,
1394                 unsigned long start_addr, unsigned long end_addr,
1395                 struct zap_details *details)
1396 {
1397         unsigned long restart_addr;
1398         int need_break;
1399
1400 again:
1401         restart_addr = vma->vm_truncate_count;
1402         if (is_restart_addr(restart_addr) && start_addr < restart_addr) {
1403                 start_addr = restart_addr;
1404                 if (start_addr >= end_addr) {
1405                         /* Top of vma has been split off since last time */
1406                         vma->vm_truncate_count = details->truncate_count;
1407                         return 0;
1408                 }
1409         }
1410
1411         restart_addr = zap_page_range(vma, start_addr,
1412                                         end_addr - start_addr, details);
1413
1414         /*
1415          * We cannot rely on the break test in unmap_vmas:
1416          * on the one hand, we don't want to restart our loop
1417          * just because that broke out for the page_table_lock;
1418          * on the other hand, it does no test when vma is small.
1419          */
1420         need_break = need_resched() ||
1421                         need_lockbreak(details->i_mmap_lock);
1422
1423         if (restart_addr >= end_addr) {
1424                 /* We have now completed this vma: mark it so */
1425                 vma->vm_truncate_count = details->truncate_count;
1426                 if (!need_break)
1427                         return 0;
1428         } else {
1429                 /* Note restart_addr in vma's truncate_count field */
1430                 vma->vm_truncate_count = restart_addr;
1431                 if (!need_break)
1432                         goto again;
1433         }
1434
1435         spin_unlock(details->i_mmap_lock);
1436         cond_resched();
1437         spin_lock(details->i_mmap_lock);
1438         return -EINTR;
1439 }
1440
1441 static inline void unmap_mapping_range_tree(struct prio_tree_root *root,
1442                                             struct zap_details *details)
1443 {
1444         struct vm_area_struct *vma;
1445         struct prio_tree_iter iter;
1446         pgoff_t vba, vea, zba, zea;
1447
1448 restart:
1449         vma_prio_tree_foreach(vma, &iter, root,
1450                         details->first_index, details->last_index) {
1451                 /* Skip quickly over those we have already dealt with */
1452                 if (vma->vm_truncate_count == details->truncate_count)
1453                         continue;
1454
1455                 vba = vma->vm_pgoff;
1456                 vea = vba + ((vma->vm_end - vma->vm_start) >> PAGE_SHIFT) - 1;
1457                 /* Assume for now that PAGE_CACHE_SHIFT == PAGE_SHIFT */
1458                 zba = details->first_index;
1459                 if (zba < vba)
1460                         zba = vba;
1461                 zea = details->last_index;
1462                 if (zea > vea)
1463                         zea = vea;
1464
1465                 if (unmap_mapping_range_vma(vma,
1466                         ((zba - vba) << PAGE_SHIFT) + vma->vm_start,
1467                         ((zea - vba + 1) << PAGE_SHIFT) + vma->vm_start,
1468                                 details) < 0)
1469                         goto restart;
1470         }
1471 }
1472
1473 static inline void unmap_mapping_range_list(struct list_head *head,
1474                                             struct zap_details *details)
1475 {
1476         struct vm_area_struct *vma;
1477
1478         /*
1479          * In nonlinear VMAs there is no correspondence between virtual address
1480          * offset and file offset.  So we must perform an exhaustive search
1481          * across *all* the pages in each nonlinear VMA, not just the pages
1482          * whose virtual address lies outside the file truncation point.
1483          */
1484 restart:
1485         list_for_each_entry(vma, head, shared.vm_set.list) {
1486                 /* Skip quickly over those we have already dealt with */
1487                 if (vma->vm_truncate_count == details->truncate_count)
1488                         continue;
1489                 details->nonlinear_vma = vma;
1490                 if (unmap_mapping_range_vma(vma, vma->vm_start,
1491                                         vma->vm_end, details) < 0)
1492                         goto restart;
1493         }
1494 }
1495
1496 /**
1497  * unmap_mapping_range - unmap the portion of all mmaps
1498  * in the specified address_space corresponding to the specified
1499  * page range in the underlying file.
1500  * @mapping: the address space containing mmaps to be unmapped.
1501  * @holebegin: byte in first page to unmap, relative to the start of
1502  * the underlying file.  This will be rounded down to a PAGE_SIZE
1503  * boundary.  Note that this is different from vmtruncate(), which
1504  * must keep the partial page.  In contrast, we must get rid of
1505  * partial pages.
1506  * @holelen: size of prospective hole in bytes.  This will be rounded
1507  * up to a PAGE_SIZE boundary.  A holelen of zero truncates to the
1508  * end of the file.
1509  * @even_cows: 1 when truncating a file, unmap even private COWed pages;
1510  * but 0 when invalidating pagecache, don't throw away private data.
1511  */
1512 void unmap_mapping_range(struct address_space *mapping,
1513                 loff_t const holebegin, loff_t const holelen, int even_cows)
1514 {
1515         struct zap_details details;
1516         pgoff_t hba = holebegin >> PAGE_SHIFT;
1517         pgoff_t hlen = (holelen + PAGE_SIZE - 1) >> PAGE_SHIFT;
1518
1519         /* Check for overflow. */
1520         if (sizeof(holelen) > sizeof(hlen)) {
1521                 long long holeend =
1522                         (holebegin + holelen + PAGE_SIZE - 1) >> PAGE_SHIFT;
1523                 if (holeend & ~(long long)ULONG_MAX)
1524                         hlen = ULONG_MAX - hba + 1;
1525         }
1526
1527         details.check_mapping = even_cows? NULL: mapping;
1528         details.nonlinear_vma = NULL;
1529         details.first_index = hba;
1530         details.last_index = hba + hlen - 1;
1531         if (details.last_index < details.first_index)
1532                 details.last_index = ULONG_MAX;
1533         details.i_mmap_lock = &mapping->i_mmap_lock;
1534
1535         spin_lock(&mapping->i_mmap_lock);
1536
1537         /* serialize i_size write against truncate_count write */
1538         smp_wmb();
1539         /* Protect against page faults, and endless unmapping loops */
1540         mapping->truncate_count++;
1541         /*
1542          * For archs where spin_lock has inclusive semantics like ia64
1543          * this smp_mb() will prevent to read pagetable contents
1544          * before the truncate_count increment is visible to
1545          * other cpus.
1546          */
1547         smp_mb();
1548         if (unlikely(is_restart_addr(mapping->truncate_count))) {
1549                 if (mapping->truncate_count == 0)
1550                         reset_vma_truncate_counts(mapping);
1551                 mapping->truncate_count++;
1552         }
1553         details.truncate_count = mapping->truncate_count;
1554
1555         if (unlikely(!prio_tree_empty(&mapping->i_mmap)))
1556                 unmap_mapping_range_tree(&mapping->i_mmap, &details);
1557         if (unlikely(!list_empty(&mapping->i_mmap_nonlinear)))
1558                 unmap_mapping_range_list(&mapping->i_mmap_nonlinear, &details);
1559         spin_unlock(&mapping->i_mmap_lock);
1560 }
1561 EXPORT_SYMBOL(unmap_mapping_range);
1562
1563 /*
1564  * Handle all mappings that got truncated by a "truncate()"
1565  * system call.
1566  *
1567  * NOTE! We have to be ready to update the memory sharing
1568  * between the file and the memory map for a potential last
1569  * incomplete page.  Ugly, but necessary.
1570  */
1571 int vmtruncate(struct inode * inode, loff_t offset)
1572 {
1573         struct address_space *mapping = inode->i_mapping;
1574         unsigned long limit;
1575
1576         if (inode->i_size < offset)
1577                 goto do_expand;
1578         /*
1579          * truncation of in-use swapfiles is disallowed - it would cause
1580          * subsequent swapout to scribble on the now-freed blocks.
1581          */
1582         if (IS_SWAPFILE(inode))
1583                 goto out_busy;
1584         i_size_write(inode, offset);
1585         unmap_mapping_range(mapping, offset + PAGE_SIZE - 1, 0, 1);
1586         truncate_inode_pages(mapping, offset);
1587         goto out_truncate;
1588
1589 do_expand:
1590         limit = current->signal->rlim[RLIMIT_FSIZE].rlim_cur;
1591         if (limit != RLIM_INFINITY && offset > limit)
1592                 goto out_sig;
1593         if (offset > inode->i_sb->s_maxbytes)
1594                 goto out_big;
1595         i_size_write(inode, offset);
1596
1597 out_truncate:
1598         if (inode->i_op && inode->i_op->truncate)
1599                 inode->i_op->truncate(inode);
1600         return 0;
1601 out_sig:
1602         send_sig(SIGXFSZ, current, 0);
1603 out_big:
1604         return -EFBIG;
1605 out_busy:
1606         return -ETXTBSY;
1607 }
1608
1609 EXPORT_SYMBOL(vmtruncate);
1610
1611 /* 
1612  * Primitive swap readahead code. We simply read an aligned block of
1613  * (1 << page_cluster) entries in the swap area. This method is chosen
1614  * because it doesn't cost us any seek time.  We also make sure to queue
1615  * the 'original' request together with the readahead ones...  
1616  *
1617  * This has been extended to use the NUMA policies from the mm triggering
1618  * the readahead.
1619  *
1620  * Caller must hold down_read on the vma->vm_mm if vma is not NULL.
1621  */
1622 void swapin_readahead(swp_entry_t entry, unsigned long addr,struct vm_area_struct *vma)
1623 {
1624 #ifdef CONFIG_NUMA
1625         struct vm_area_struct *next_vma = vma ? vma->vm_next : NULL;
1626 #endif
1627         int i, num;
1628         struct page *new_page;
1629         unsigned long offset;
1630
1631         /*
1632          * Get the number of handles we should do readahead io to.
1633          */
1634         num = valid_swaphandles(entry, &offset);
1635         for (i = 0; i < num; offset++, i++) {
1636                 /* Ok, do the async read-ahead now */
1637                 new_page = read_swap_cache_async(swp_entry(swp_type(entry),
1638                                                            offset), vma, addr);
1639                 if (!new_page)
1640                         break;
1641                 page_cache_release(new_page);
1642 #ifdef CONFIG_NUMA
1643                 /*
1644                  * Find the next applicable VMA for the NUMA policy.
1645                  */
1646                 addr += PAGE_SIZE;
1647                 if (addr == 0)
1648                         vma = NULL;
1649                 if (vma) {
1650                         if (addr >= vma->vm_end) {
1651                                 vma = next_vma;
1652                                 next_vma = vma ? vma->vm_next : NULL;
1653                         }
1654                         if (vma && addr < vma->vm_start)
1655                                 vma = NULL;
1656                 } else {
1657                         if (next_vma && addr >= next_vma->vm_start) {
1658                                 vma = next_vma;
1659                                 next_vma = vma->vm_next;
1660                         }
1661                 }
1662 #endif
1663         }
1664         lru_add_drain();        /* Push any new pages onto the LRU now */
1665 }
1666
1667 /*
1668  * We hold the mm semaphore and the page_table_lock on entry and
1669  * should release the pagetable lock on exit..
1670  */
1671 static int do_swap_page(struct mm_struct *mm, struct vm_area_struct *vma,
1672                 unsigned long address, pte_t *page_table, pmd_t *pmd,
1673                 int write_access, pte_t orig_pte)
1674 {
1675         struct page *page;
1676         swp_entry_t entry;
1677         pte_t pte;
1678         int ret = VM_FAULT_MINOR;
1679
1680         pte_unmap(page_table);
1681         spin_unlock(&mm->page_table_lock);
1682
1683         entry = pte_to_swp_entry(orig_pte);
1684         page = lookup_swap_cache(entry);
1685         if (!page) {
1686                 swapin_readahead(entry, address, vma);
1687                 page = read_swap_cache_async(entry, vma, address);
1688                 if (!page) {
1689                         /*
1690                          * Back out if somebody else faulted in this pte while
1691                          * we released the page table lock.
1692                          */
1693                         spin_lock(&mm->page_table_lock);
1694                         page_table = pte_offset_map(pmd, address);
1695                         if (likely(pte_same(*page_table, orig_pte)))
1696                                 ret = VM_FAULT_OOM;
1697                         goto unlock;
1698                 }
1699
1700                 /* Had to read the page from swap area: Major fault */
1701                 ret = VM_FAULT_MAJOR;
1702                 inc_page_state(pgmajfault);
1703                 grab_swap_token();
1704         }
1705
1706         mark_page_accessed(page);
1707         lock_page(page);
1708
1709         /*
1710          * Back out if somebody else faulted in this pte while we
1711          * released the page table lock.
1712          */
1713         spin_lock(&mm->page_table_lock);
1714         page_table = pte_offset_map(pmd, address);
1715         if (unlikely(!pte_same(*page_table, orig_pte)))
1716                 goto out_nomap;
1717
1718         if (unlikely(!PageUptodate(page))) {
1719                 ret = VM_FAULT_SIGBUS;
1720                 goto out_nomap;
1721         }
1722
1723         /* The page isn't present yet, go ahead with the fault. */
1724
1725         inc_mm_counter(mm, anon_rss);
1726         pte = mk_pte(page, vma->vm_page_prot);
1727         if (write_access && can_share_swap_page(page)) {
1728                 pte = maybe_mkwrite(pte_mkdirty(pte), vma);
1729                 write_access = 0;
1730         }
1731
1732         flush_icache_page(vma, page);
1733         set_pte_at(mm, address, page_table, pte);
1734         page_add_anon_rmap(page, vma, address);
1735
1736         swap_free(entry);
1737         if (vm_swap_full())
1738                 remove_exclusive_swap_page(page);
1739         unlock_page(page);
1740
1741         if (write_access) {
1742                 if (do_wp_page(mm, vma, address,
1743                                 page_table, pmd, pte) == VM_FAULT_OOM)
1744                         ret = VM_FAULT_OOM;
1745                 goto out;
1746         }
1747
1748         /* No need to invalidate - it was non-present before */
1749         update_mmu_cache(vma, address, pte);
1750         lazy_mmu_prot_update(pte);
1751 unlock:
1752         pte_unmap(page_table);
1753         spin_unlock(&mm->page_table_lock);
1754 out:
1755         return ret;
1756 out_nomap:
1757         pte_unmap(page_table);
1758         spin_unlock(&mm->page_table_lock);
1759         unlock_page(page);
1760         page_cache_release(page);
1761         return ret;
1762 }
1763
1764 /*
1765  * We are called with the MM semaphore and page_table_lock
1766  * spinlock held to protect against concurrent faults in
1767  * multithreaded programs. 
1768  */
1769 static int do_anonymous_page(struct mm_struct *mm, struct vm_area_struct *vma,
1770                 unsigned long address, pte_t *page_table, pmd_t *pmd,
1771                 int write_access)
1772 {
1773         struct page *page = ZERO_PAGE(addr);
1774         pte_t entry;
1775
1776         /* Mapping of ZERO_PAGE - vm_page_prot is readonly */
1777         entry = mk_pte(page, vma->vm_page_prot);
1778
1779         if (write_access) {
1780                 /* Allocate our own private page. */
1781                 pte_unmap(page_table);
1782                 spin_unlock(&mm->page_table_lock);
1783
1784                 if (unlikely(anon_vma_prepare(vma)))
1785                         goto oom;
1786                 page = alloc_zeroed_user_highpage(vma, address);
1787                 if (!page)
1788                         goto oom;
1789
1790                 spin_lock(&mm->page_table_lock);
1791                 page_table = pte_offset_map(pmd, address);
1792
1793                 if (!pte_none(*page_table)) {
1794                         page_cache_release(page);
1795                         goto unlock;
1796                 }
1797                 inc_mm_counter(mm, anon_rss);
1798                 entry = mk_pte(page, vma->vm_page_prot);
1799                 entry = maybe_mkwrite(pte_mkdirty(entry), vma);
1800                 lru_cache_add_active(page);
1801                 SetPageReferenced(page);
1802                 page_add_anon_rmap(page, vma, address);
1803         } else {
1804                 inc_mm_counter(mm, file_rss);
1805                 page_add_file_rmap(page);
1806                 page_cache_get(page);
1807         }
1808
1809         set_pte_at(mm, address, page_table, entry);
1810
1811         /* No need to invalidate - it was non-present before */
1812         update_mmu_cache(vma, address, entry);
1813         lazy_mmu_prot_update(entry);
1814 unlock:
1815         pte_unmap(page_table);
1816         spin_unlock(&mm->page_table_lock);
1817         return VM_FAULT_MINOR;
1818 oom:
1819         return VM_FAULT_OOM;
1820 }
1821
1822 /*
1823  * do_no_page() tries to create a new page mapping. It aggressively
1824  * tries to share with existing pages, but makes a separate copy if
1825  * the "write_access" parameter is true in order to avoid the next
1826  * page fault.
1827  *
1828  * As this is called only for pages that do not currently exist, we
1829  * do not need to flush old virtual caches or the TLB.
1830  *
1831  * This is called with the MM semaphore held and the page table
1832  * spinlock held. Exit with the spinlock released.
1833  */
1834 static int do_no_page(struct mm_struct *mm, struct vm_area_struct *vma,
1835                 unsigned long address, pte_t *page_table, pmd_t *pmd,
1836                 int write_access)
1837 {
1838         struct page *new_page;
1839         struct address_space *mapping = NULL;
1840         pte_t entry;
1841         unsigned int sequence = 0;
1842         int ret = VM_FAULT_MINOR;
1843         int anon = 0;
1844
1845         pte_unmap(page_table);
1846         spin_unlock(&mm->page_table_lock);
1847
1848         if (vma->vm_file) {
1849                 mapping = vma->vm_file->f_mapping;
1850                 sequence = mapping->truncate_count;
1851                 smp_rmb(); /* serializes i_size against truncate_count */
1852         }
1853 retry:
1854         new_page = vma->vm_ops->nopage(vma, address & PAGE_MASK, &ret);
1855         /*
1856          * No smp_rmb is needed here as long as there's a full
1857          * spin_lock/unlock sequence inside the ->nopage callback
1858          * (for the pagecache lookup) that acts as an implicit
1859          * smp_mb() and prevents the i_size read to happen
1860          * after the next truncate_count read.
1861          */
1862
1863         /* no page was available -- either SIGBUS or OOM */
1864         if (new_page == NOPAGE_SIGBUS)
1865                 return VM_FAULT_SIGBUS;
1866         if (new_page == NOPAGE_OOM)
1867                 return VM_FAULT_OOM;
1868
1869         /*
1870          * Should we do an early C-O-W break?
1871          */
1872         if (write_access && !(vma->vm_flags & VM_SHARED)) {
1873                 struct page *page;
1874
1875                 if (unlikely(anon_vma_prepare(vma)))
1876                         goto oom;
1877                 page = alloc_page_vma(GFP_HIGHUSER, vma, address);
1878                 if (!page)
1879                         goto oom;
1880                 copy_user_highpage(page, new_page, address);
1881                 page_cache_release(new_page);
1882                 new_page = page;
1883                 anon = 1;
1884         }
1885
1886         spin_lock(&mm->page_table_lock);
1887         /*
1888          * For a file-backed vma, someone could have truncated or otherwise
1889          * invalidated this page.  If unmap_mapping_range got called,
1890          * retry getting the page.
1891          */
1892         if (mapping && unlikely(sequence != mapping->truncate_count)) {
1893                 spin_unlock(&mm->page_table_lock);
1894                 page_cache_release(new_page);
1895                 cond_resched();
1896                 sequence = mapping->truncate_count;
1897                 smp_rmb();
1898                 goto retry;
1899         }
1900         page_table = pte_offset_map(pmd, address);
1901
1902         /*
1903          * This silly early PAGE_DIRTY setting removes a race
1904          * due to the bad i386 page protection. But it's valid
1905          * for other architectures too.
1906          *
1907          * Note that if write_access is true, we either now have
1908          * an exclusive copy of the page, or this is a shared mapping,
1909          * so we can make it writable and dirty to avoid having to
1910          * handle that later.
1911          */
1912         /* Only go through if we didn't race with anybody else... */
1913         if (pte_none(*page_table)) {
1914                 flush_icache_page(vma, new_page);
1915                 entry = mk_pte(new_page, vma->vm_page_prot);
1916                 if (write_access)
1917                         entry = maybe_mkwrite(pte_mkdirty(entry), vma);
1918                 set_pte_at(mm, address, page_table, entry);
1919                 if (anon) {
1920                         inc_mm_counter(mm, anon_rss);
1921                         lru_cache_add_active(new_page);
1922                         page_add_anon_rmap(new_page, vma, address);
1923                 } else if (!(vma->vm_flags & VM_RESERVED)) {
1924                         inc_mm_counter(mm, file_rss);
1925                         page_add_file_rmap(new_page);
1926                 }
1927         } else {
1928                 /* One of our sibling threads was faster, back out. */
1929                 page_cache_release(new_page);
1930                 goto unlock;
1931         }
1932
1933         /* no need to invalidate: a not-present page shouldn't be cached */
1934         update_mmu_cache(vma, address, entry);
1935         lazy_mmu_prot_update(entry);
1936 unlock:
1937         pte_unmap(page_table);
1938         spin_unlock(&mm->page_table_lock);
1939         return ret;
1940 oom:
1941         page_cache_release(new_page);
1942         return VM_FAULT_OOM;
1943 }
1944
1945 /*
1946  * Fault of a previously existing named mapping. Repopulate the pte
1947  * from the encoded file_pte if possible. This enables swappable
1948  * nonlinear vmas.
1949  */
1950 static int do_file_page(struct mm_struct *mm, struct vm_area_struct *vma,
1951                 unsigned long address, pte_t *page_table, pmd_t *pmd,
1952                 int write_access, pte_t orig_pte)
1953 {
1954         pgoff_t pgoff;
1955         int err;
1956
1957         pte_unmap(page_table);
1958         spin_unlock(&mm->page_table_lock);
1959
1960         if (unlikely(!(vma->vm_flags & VM_NONLINEAR))) {
1961                 /*
1962                  * Page table corrupted: show pte and kill process.
1963                  */
1964                 print_bad_pte(vma, orig_pte, address);
1965                 return VM_FAULT_OOM;
1966         }
1967         /* We can then assume vm->vm_ops && vma->vm_ops->populate */
1968
1969         pgoff = pte_to_pgoff(orig_pte);
1970         err = vma->vm_ops->populate(vma, address & PAGE_MASK, PAGE_SIZE,
1971                                         vma->vm_page_prot, pgoff, 0);
1972         if (err == -ENOMEM)
1973                 return VM_FAULT_OOM;
1974         if (err)
1975                 return VM_FAULT_SIGBUS;
1976         return VM_FAULT_MAJOR;
1977 }
1978
1979 /*
1980  * These routines also need to handle stuff like marking pages dirty
1981  * and/or accessed for architectures that don't do it in hardware (most
1982  * RISC architectures).  The early dirtying is also good on the i386.
1983  *
1984  * There is also a hook called "update_mmu_cache()" that architectures
1985  * with external mmu caches can use to update those (ie the Sparc or
1986  * PowerPC hashed page tables that act as extended TLBs).
1987  *
1988  * Note the "page_table_lock". It is to protect against kswapd removing
1989  * pages from under us. Note that kswapd only ever _removes_ pages, never
1990  * adds them. As such, once we have noticed that the page is not present,
1991  * we can drop the lock early.
1992  *
1993  * The adding of pages is protected by the MM semaphore (which we hold),
1994  * so we don't need to worry about a page being suddenly been added into
1995  * our VM.
1996  *
1997  * We enter with the pagetable spinlock held, we are supposed to
1998  * release it when done.
1999  */
2000 static inline int handle_pte_fault(struct mm_struct *mm,
2001                 struct vm_area_struct *vma, unsigned long address,
2002                 pte_t *pte, pmd_t *pmd, int write_access)
2003 {
2004         pte_t entry;
2005
2006         entry = *pte;
2007         if (!pte_present(entry)) {
2008                 if (pte_none(entry)) {
2009                         if (!vma->vm_ops || !vma->vm_ops->nopage)
2010                                 return do_anonymous_page(mm, vma, address,
2011                                         pte, pmd, write_access);
2012                         return do_no_page(mm, vma, address,
2013                                         pte, pmd, write_access);
2014                 }
2015                 if (pte_file(entry))
2016                         return do_file_page(mm, vma, address,
2017                                         pte, pmd, write_access, entry);
2018                 return do_swap_page(mm, vma, address,
2019                                         pte, pmd, write_access, entry);
2020         }
2021
2022         if (write_access) {
2023                 if (!pte_write(entry))
2024                         return do_wp_page(mm, vma, address, pte, pmd, entry);
2025                 entry = pte_mkdirty(entry);
2026         }
2027         entry = pte_mkyoung(entry);
2028         ptep_set_access_flags(vma, address, pte, entry, write_access);
2029         update_mmu_cache(vma, address, entry);
2030         lazy_mmu_prot_update(entry);
2031         pte_unmap(pte);
2032         spin_unlock(&mm->page_table_lock);
2033         return VM_FAULT_MINOR;
2034 }
2035
2036 /*
2037  * By the time we get here, we already hold the mm semaphore
2038  */
2039 int __handle_mm_fault(struct mm_struct *mm, struct vm_area_struct *vma,
2040                 unsigned long address, int write_access)
2041 {
2042         pgd_t *pgd;
2043         pud_t *pud;
2044         pmd_t *pmd;
2045         pte_t *pte;
2046
2047         __set_current_state(TASK_RUNNING);
2048
2049         inc_page_state(pgfault);
2050
2051         if (unlikely(is_vm_hugetlb_page(vma)))
2052                 return hugetlb_fault(mm, vma, address, write_access);
2053
2054         /*
2055          * We need the page table lock to synchronize with kswapd
2056          * and the SMP-safe atomic PTE updates.
2057          */
2058         pgd = pgd_offset(mm, address);
2059         spin_lock(&mm->page_table_lock);
2060
2061         pud = pud_alloc(mm, pgd, address);
2062         if (!pud)
2063                 goto oom;
2064
2065         pmd = pmd_alloc(mm, pud, address);
2066         if (!pmd)
2067                 goto oom;
2068
2069         pte = pte_alloc_map(mm, pmd, address);
2070         if (!pte)
2071                 goto oom;
2072         
2073         return handle_pte_fault(mm, vma, address, pte, pmd, write_access);
2074
2075  oom:
2076         spin_unlock(&mm->page_table_lock);
2077         return VM_FAULT_OOM;
2078 }
2079
2080 #ifndef __PAGETABLE_PUD_FOLDED
2081 /*
2082  * Allocate page upper directory.
2083  * We've already handled the fast-path in-line.
2084  */
2085 int __pud_alloc(struct mm_struct *mm, pgd_t *pgd, unsigned long address)
2086 {
2087         pud_t *new;
2088
2089         if (mm != &init_mm)             /* Temporary bridging hack */
2090                 spin_unlock(&mm->page_table_lock);
2091         new = pud_alloc_one(mm, address);
2092         if (!new) {
2093                 if (mm != &init_mm)     /* Temporary bridging hack */
2094                         spin_lock(&mm->page_table_lock);
2095                 return -ENOMEM;
2096         }
2097
2098         spin_lock(&mm->page_table_lock);
2099         if (pgd_present(*pgd))          /* Another has populated it */
2100                 pud_free(new);
2101         else
2102                 pgd_populate(mm, pgd, new);
2103         if (mm == &init_mm)             /* Temporary bridging hack */
2104                 spin_unlock(&mm->page_table_lock);
2105         return 0;
2106 }
2107 #endif /* __PAGETABLE_PUD_FOLDED */
2108
2109 #ifndef __PAGETABLE_PMD_FOLDED
2110 /*
2111  * Allocate page middle directory.
2112  * We've already handled the fast-path in-line.
2113  */
2114 int __pmd_alloc(struct mm_struct *mm, pud_t *pud, unsigned long address)
2115 {
2116         pmd_t *new;
2117
2118         if (mm != &init_mm)             /* Temporary bridging hack */
2119                 spin_unlock(&mm->page_table_lock);
2120         new = pmd_alloc_one(mm, address);
2121         if (!new) {
2122                 if (mm != &init_mm)     /* Temporary bridging hack */
2123                         spin_lock(&mm->page_table_lock);
2124                 return -ENOMEM;
2125         }
2126
2127         spin_lock(&mm->page_table_lock);
2128 #ifndef __ARCH_HAS_4LEVEL_HACK
2129         if (pud_present(*pud))          /* Another has populated it */
2130                 pmd_free(new);
2131         else
2132                 pud_populate(mm, pud, new);
2133 #else
2134         if (pgd_present(*pud))          /* Another has populated it */
2135                 pmd_free(new);
2136         else
2137                 pgd_populate(mm, pud, new);
2138 #endif /* __ARCH_HAS_4LEVEL_HACK */
2139         if (mm == &init_mm)             /* Temporary bridging hack */
2140                 spin_unlock(&mm->page_table_lock);
2141         return 0;
2142 }
2143 #endif /* __PAGETABLE_PMD_FOLDED */
2144
2145 int make_pages_present(unsigned long addr, unsigned long end)
2146 {
2147         int ret, len, write;
2148         struct vm_area_struct * vma;
2149
2150         vma = find_vma(current->mm, addr);
2151         if (!vma)
2152                 return -1;
2153         write = (vma->vm_flags & VM_WRITE) != 0;
2154         if (addr >= end)
2155                 BUG();
2156         if (end > vma->vm_end)
2157                 BUG();
2158         len = (end+PAGE_SIZE-1)/PAGE_SIZE-addr/PAGE_SIZE;
2159         ret = get_user_pages(current, current->mm, addr,
2160                         len, write, 0, NULL, NULL);
2161         if (ret < 0)
2162                 return ret;
2163         return ret == len ? 0 : -1;
2164 }
2165
2166 /* 
2167  * Map a vmalloc()-space virtual address to the physical page.
2168  */
2169 struct page * vmalloc_to_page(void * vmalloc_addr)
2170 {
2171         unsigned long addr = (unsigned long) vmalloc_addr;
2172         struct page *page = NULL;
2173         pgd_t *pgd = pgd_offset_k(addr);
2174         pud_t *pud;
2175         pmd_t *pmd;
2176         pte_t *ptep, pte;
2177   
2178         if (!pgd_none(*pgd)) {
2179                 pud = pud_offset(pgd, addr);
2180                 if (!pud_none(*pud)) {
2181                         pmd = pmd_offset(pud, addr);
2182                         if (!pmd_none(*pmd)) {
2183                                 ptep = pte_offset_map(pmd, addr);
2184                                 pte = *ptep;
2185                                 if (pte_present(pte))
2186                                         page = pte_page(pte);
2187                                 pte_unmap(ptep);
2188                         }
2189                 }
2190         }
2191         return page;
2192 }
2193
2194 EXPORT_SYMBOL(vmalloc_to_page);
2195
2196 /*
2197  * Map a vmalloc()-space virtual address to the physical page frame number.
2198  */
2199 unsigned long vmalloc_to_pfn(void * vmalloc_addr)
2200 {
2201         return page_to_pfn(vmalloc_to_page(vmalloc_addr));
2202 }
2203
2204 EXPORT_SYMBOL(vmalloc_to_pfn);
2205
2206 #if !defined(__HAVE_ARCH_GATE_AREA)
2207
2208 #if defined(AT_SYSINFO_EHDR)
2209 static struct vm_area_struct gate_vma;
2210
2211 static int __init gate_vma_init(void)
2212 {
2213         gate_vma.vm_mm = NULL;
2214         gate_vma.vm_start = FIXADDR_USER_START;
2215         gate_vma.vm_end = FIXADDR_USER_END;
2216         gate_vma.vm_page_prot = PAGE_READONLY;
2217         gate_vma.vm_flags = VM_RESERVED;
2218         return 0;
2219 }
2220 __initcall(gate_vma_init);
2221 #endif
2222
2223 struct vm_area_struct *get_gate_vma(struct task_struct *tsk)
2224 {
2225 #ifdef AT_SYSINFO_EHDR
2226         return &gate_vma;
2227 #else
2228         return NULL;
2229 #endif
2230 }
2231
2232 int in_gate_area_no_task(unsigned long addr)
2233 {
2234 #ifdef AT_SYSINFO_EHDR
2235         if ((addr >= FIXADDR_USER_START) && (addr < FIXADDR_USER_END))
2236                 return 1;
2237 #endif
2238         return 0;
2239 }
2240
2241 #endif  /* __HAVE_ARCH_GATE_AREA */