]> git.kernelconcepts.de Git - karo-tx-linux.git/blob - mm/swap_state.c
net: make struct request_sock_ops::obj_size unsigned
[karo-tx-linux.git] / mm / swap_state.c
1 /*
2  *  linux/mm/swap_state.c
3  *
4  *  Copyright (C) 1991, 1992, 1993, 1994  Linus Torvalds
5  *  Swap reorganised 29.12.95, Stephen Tweedie
6  *
7  *  Rewritten to use page cache, (C) 1998 Stephen Tweedie
8  */
9 #include <linux/mm.h>
10 #include <linux/gfp.h>
11 #include <linux/kernel_stat.h>
12 #include <linux/swap.h>
13 #include <linux/swapops.h>
14 #include <linux/init.h>
15 #include <linux/pagemap.h>
16 #include <linux/backing-dev.h>
17 #include <linux/blkdev.h>
18 #include <linux/pagevec.h>
19 #include <linux/migrate.h>
20 #include <linux/vmalloc.h>
21 #include <linux/swap_slots.h>
22
23 #include <asm/pgtable.h>
24
25 /*
26  * swapper_space is a fiction, retained to simplify the path through
27  * vmscan's shrink_page_list.
28  */
29 static const struct address_space_operations swap_aops = {
30         .writepage      = swap_writepage,
31         .set_page_dirty = swap_set_page_dirty,
32 #ifdef CONFIG_MIGRATION
33         .migratepage    = migrate_page,
34 #endif
35 };
36
37 struct address_space *swapper_spaces[MAX_SWAPFILES];
38 static unsigned int nr_swapper_spaces[MAX_SWAPFILES];
39
40 #define INC_CACHE_INFO(x)       do { swap_cache_info.x++; } while (0)
41
42 static struct {
43         unsigned long add_total;
44         unsigned long del_total;
45         unsigned long find_success;
46         unsigned long find_total;
47 } swap_cache_info;
48
49 unsigned long total_swapcache_pages(void)
50 {
51         unsigned int i, j, nr;
52         unsigned long ret = 0;
53         struct address_space *spaces;
54
55         rcu_read_lock();
56         for (i = 0; i < MAX_SWAPFILES; i++) {
57                 /*
58                  * The corresponding entries in nr_swapper_spaces and
59                  * swapper_spaces will be reused only after at least
60                  * one grace period.  So it is impossible for them
61                  * belongs to different usage.
62                  */
63                 nr = nr_swapper_spaces[i];
64                 spaces = rcu_dereference(swapper_spaces[i]);
65                 if (!nr || !spaces)
66                         continue;
67                 for (j = 0; j < nr; j++)
68                         ret += spaces[j].nrpages;
69         }
70         rcu_read_unlock();
71         return ret;
72 }
73
74 static atomic_t swapin_readahead_hits = ATOMIC_INIT(4);
75
76 void show_swap_cache_info(void)
77 {
78         printk("%lu pages in swap cache\n", total_swapcache_pages());
79         printk("Swap cache stats: add %lu, delete %lu, find %lu/%lu\n",
80                 swap_cache_info.add_total, swap_cache_info.del_total,
81                 swap_cache_info.find_success, swap_cache_info.find_total);
82         printk("Free swap  = %ldkB\n",
83                 get_nr_swap_pages() << (PAGE_SHIFT - 10));
84         printk("Total swap = %lukB\n", total_swap_pages << (PAGE_SHIFT - 10));
85 }
86
87 /*
88  * __add_to_swap_cache resembles add_to_page_cache_locked on swapper_space,
89  * but sets SwapCache flag and private instead of mapping and index.
90  */
91 int __add_to_swap_cache(struct page *page, swp_entry_t entry)
92 {
93         int error;
94         struct address_space *address_space;
95
96         VM_BUG_ON_PAGE(!PageLocked(page), page);
97         VM_BUG_ON_PAGE(PageSwapCache(page), page);
98         VM_BUG_ON_PAGE(!PageSwapBacked(page), page);
99
100         get_page(page);
101         SetPageSwapCache(page);
102         set_page_private(page, entry.val);
103
104         address_space = swap_address_space(entry);
105         spin_lock_irq(&address_space->tree_lock);
106         error = radix_tree_insert(&address_space->page_tree,
107                                   swp_offset(entry), page);
108         if (likely(!error)) {
109                 address_space->nrpages++;
110                 __inc_node_page_state(page, NR_FILE_PAGES);
111                 INC_CACHE_INFO(add_total);
112         }
113         spin_unlock_irq(&address_space->tree_lock);
114
115         if (unlikely(error)) {
116                 /*
117                  * Only the context which have set SWAP_HAS_CACHE flag
118                  * would call add_to_swap_cache().
119                  * So add_to_swap_cache() doesn't returns -EEXIST.
120                  */
121                 VM_BUG_ON(error == -EEXIST);
122                 set_page_private(page, 0UL);
123                 ClearPageSwapCache(page);
124                 put_page(page);
125         }
126
127         return error;
128 }
129
130
131 int add_to_swap_cache(struct page *page, swp_entry_t entry, gfp_t gfp_mask)
132 {
133         int error;
134
135         error = radix_tree_maybe_preload(gfp_mask);
136         if (!error) {
137                 error = __add_to_swap_cache(page, entry);
138                 radix_tree_preload_end();
139         }
140         return error;
141 }
142
143 /*
144  * This must be called only on pages that have
145  * been verified to be in the swap cache.
146  */
147 void __delete_from_swap_cache(struct page *page)
148 {
149         swp_entry_t entry;
150         struct address_space *address_space;
151
152         VM_BUG_ON_PAGE(!PageLocked(page), page);
153         VM_BUG_ON_PAGE(!PageSwapCache(page), page);
154         VM_BUG_ON_PAGE(PageWriteback(page), page);
155
156         entry.val = page_private(page);
157         address_space = swap_address_space(entry);
158         radix_tree_delete(&address_space->page_tree, swp_offset(entry));
159         set_page_private(page, 0);
160         ClearPageSwapCache(page);
161         address_space->nrpages--;
162         __dec_node_page_state(page, NR_FILE_PAGES);
163         INC_CACHE_INFO(del_total);
164 }
165
166 /**
167  * add_to_swap - allocate swap space for a page
168  * @page: page we want to move to swap
169  *
170  * Allocate swap space for the page and add the page to the
171  * swap cache.  Caller needs to hold the page lock. 
172  */
173 int add_to_swap(struct page *page, struct list_head *list)
174 {
175         swp_entry_t entry;
176         int err;
177
178         VM_BUG_ON_PAGE(!PageLocked(page), page);
179         VM_BUG_ON_PAGE(!PageUptodate(page), page);
180
181         entry = get_swap_page();
182         if (!entry.val)
183                 return 0;
184
185         if (mem_cgroup_try_charge_swap(page, entry)) {
186                 swapcache_free(entry);
187                 return 0;
188         }
189
190         if (unlikely(PageTransHuge(page)))
191                 if (unlikely(split_huge_page_to_list(page, list))) {
192                         swapcache_free(entry);
193                         return 0;
194                 }
195
196         /*
197          * Radix-tree node allocations from PF_MEMALLOC contexts could
198          * completely exhaust the page allocator. __GFP_NOMEMALLOC
199          * stops emergency reserves from being allocated.
200          *
201          * TODO: this could cause a theoretical memory reclaim
202          * deadlock in the swap out path.
203          */
204         /*
205          * Add it to the swap cache.
206          */
207         err = add_to_swap_cache(page, entry,
208                         __GFP_HIGH|__GFP_NOMEMALLOC|__GFP_NOWARN);
209
210         if (!err) {
211                 return 1;
212         } else {        /* -ENOMEM radix-tree allocation failure */
213                 /*
214                  * add_to_swap_cache() doesn't return -EEXIST, so we can safely
215                  * clear SWAP_HAS_CACHE flag.
216                  */
217                 swapcache_free(entry);
218                 return 0;
219         }
220 }
221
222 /*
223  * This must be called only on pages that have
224  * been verified to be in the swap cache and locked.
225  * It will never put the page into the free list,
226  * the caller has a reference on the page.
227  */
228 void delete_from_swap_cache(struct page *page)
229 {
230         swp_entry_t entry;
231         struct address_space *address_space;
232
233         entry.val = page_private(page);
234
235         address_space = swap_address_space(entry);
236         spin_lock_irq(&address_space->tree_lock);
237         __delete_from_swap_cache(page);
238         spin_unlock_irq(&address_space->tree_lock);
239
240         swapcache_free(entry);
241         put_page(page);
242 }
243
244 /* 
245  * If we are the only user, then try to free up the swap cache. 
246  * 
247  * Its ok to check for PageSwapCache without the page lock
248  * here because we are going to recheck again inside
249  * try_to_free_swap() _with_ the lock.
250  *                                      - Marcelo
251  */
252 static inline void free_swap_cache(struct page *page)
253 {
254         if (PageSwapCache(page) && !page_mapped(page) && trylock_page(page)) {
255                 try_to_free_swap(page);
256                 unlock_page(page);
257         }
258 }
259
260 /* 
261  * Perform a free_page(), also freeing any swap cache associated with
262  * this page if it is the last user of the page.
263  */
264 void free_page_and_swap_cache(struct page *page)
265 {
266         free_swap_cache(page);
267         if (!is_huge_zero_page(page))
268                 put_page(page);
269 }
270
271 /*
272  * Passed an array of pages, drop them all from swapcache and then release
273  * them.  They are removed from the LRU and freed if this is their last use.
274  */
275 void free_pages_and_swap_cache(struct page **pages, int nr)
276 {
277         struct page **pagep = pages;
278         int i;
279
280         lru_add_drain();
281         for (i = 0; i < nr; i++)
282                 free_swap_cache(pagep[i]);
283         release_pages(pagep, nr, false);
284 }
285
286 /*
287  * Lookup a swap entry in the swap cache. A found page will be returned
288  * unlocked and with its refcount incremented - we rely on the kernel
289  * lock getting page table operations atomic even if we drop the page
290  * lock before returning.
291  */
292 struct page * lookup_swap_cache(swp_entry_t entry)
293 {
294         struct page *page;
295
296         page = find_get_page(swap_address_space(entry), swp_offset(entry));
297
298         if (page) {
299                 INC_CACHE_INFO(find_success);
300                 if (TestClearPageReadahead(page))
301                         atomic_inc(&swapin_readahead_hits);
302         }
303
304         INC_CACHE_INFO(find_total);
305         return page;
306 }
307
308 struct page *__read_swap_cache_async(swp_entry_t entry, gfp_t gfp_mask,
309                         struct vm_area_struct *vma, unsigned long addr,
310                         bool *new_page_allocated)
311 {
312         struct page *found_page, *new_page = NULL;
313         struct address_space *swapper_space = swap_address_space(entry);
314         int err;
315         *new_page_allocated = false;
316
317         do {
318                 /*
319                  * First check the swap cache.  Since this is normally
320                  * called after lookup_swap_cache() failed, re-calling
321                  * that would confuse statistics.
322                  */
323                 found_page = find_get_page(swapper_space, swp_offset(entry));
324                 if (found_page)
325                         break;
326
327                 /*
328                  * Just skip read ahead for unused swap slot.
329                  * During swap_off when swap_slot_cache is disabled,
330                  * we have to handle the race between putting
331                  * swap entry in swap cache and marking swap slot
332                  * as SWAP_HAS_CACHE.  That's done in later part of code or
333                  * else swap_off will be aborted if we return NULL.
334                  */
335                 if (!__swp_swapcount(entry) && swap_slot_cache_enabled)
336                         break;
337
338                 /*
339                  * Get a new page to read into from swap.
340                  */
341                 if (!new_page) {
342                         new_page = alloc_page_vma(gfp_mask, vma, addr);
343                         if (!new_page)
344                                 break;          /* Out of memory */
345                 }
346
347                 /*
348                  * call radix_tree_preload() while we can wait.
349                  */
350                 err = radix_tree_maybe_preload(gfp_mask & GFP_KERNEL);
351                 if (err)
352                         break;
353
354                 /*
355                  * Swap entry may have been freed since our caller observed it.
356                  */
357                 err = swapcache_prepare(entry);
358                 if (err == -EEXIST) {
359                         radix_tree_preload_end();
360                         /*
361                          * We might race against get_swap_page() and stumble
362                          * across a SWAP_HAS_CACHE swap_map entry whose page
363                          * has not been brought into the swapcache yet.
364                          */
365                         cond_resched();
366                         continue;
367                 }
368                 if (err) {              /* swp entry is obsolete ? */
369                         radix_tree_preload_end();
370                         break;
371                 }
372
373                 /* May fail (-ENOMEM) if radix-tree node allocation failed. */
374                 __SetPageLocked(new_page);
375                 __SetPageSwapBacked(new_page);
376                 err = __add_to_swap_cache(new_page, entry);
377                 if (likely(!err)) {
378                         radix_tree_preload_end();
379                         /*
380                          * Initiate read into locked page and return.
381                          */
382                         lru_cache_add_anon(new_page);
383                         *new_page_allocated = true;
384                         return new_page;
385                 }
386                 radix_tree_preload_end();
387                 __ClearPageLocked(new_page);
388                 /*
389                  * add_to_swap_cache() doesn't return -EEXIST, so we can safely
390                  * clear SWAP_HAS_CACHE flag.
391                  */
392                 swapcache_free(entry);
393         } while (err != -ENOMEM);
394
395         if (new_page)
396                 put_page(new_page);
397         return found_page;
398 }
399
400 /*
401  * Locate a page of swap in physical memory, reserving swap cache space
402  * and reading the disk if it is not already cached.
403  * A failure return means that either the page allocation failed or that
404  * the swap entry is no longer in use.
405  */
406 struct page *read_swap_cache_async(swp_entry_t entry, gfp_t gfp_mask,
407                         struct vm_area_struct *vma, unsigned long addr)
408 {
409         bool page_was_allocated;
410         struct page *retpage = __read_swap_cache_async(entry, gfp_mask,
411                         vma, addr, &page_was_allocated);
412
413         if (page_was_allocated)
414                 swap_readpage(retpage);
415
416         return retpage;
417 }
418
419 static unsigned long swapin_nr_pages(unsigned long offset)
420 {
421         static unsigned long prev_offset;
422         unsigned int pages, max_pages, last_ra;
423         static atomic_t last_readahead_pages;
424
425         max_pages = 1 << READ_ONCE(page_cluster);
426         if (max_pages <= 1)
427                 return 1;
428
429         /*
430          * This heuristic has been found to work well on both sequential and
431          * random loads, swapping to hard disk or to SSD: please don't ask
432          * what the "+ 2" means, it just happens to work well, that's all.
433          */
434         pages = atomic_xchg(&swapin_readahead_hits, 0) + 2;
435         if (pages == 2) {
436                 /*
437                  * We can have no readahead hits to judge by: but must not get
438                  * stuck here forever, so check for an adjacent offset instead
439                  * (and don't even bother to check whether swap type is same).
440                  */
441                 if (offset != prev_offset + 1 && offset != prev_offset - 1)
442                         pages = 1;
443                 prev_offset = offset;
444         } else {
445                 unsigned int roundup = 4;
446                 while (roundup < pages)
447                         roundup <<= 1;
448                 pages = roundup;
449         }
450
451         if (pages > max_pages)
452                 pages = max_pages;
453
454         /* Don't shrink readahead too fast */
455         last_ra = atomic_read(&last_readahead_pages) / 2;
456         if (pages < last_ra)
457                 pages = last_ra;
458         atomic_set(&last_readahead_pages, pages);
459
460         return pages;
461 }
462
463 /**
464  * swapin_readahead - swap in pages in hope we need them soon
465  * @entry: swap entry of this memory
466  * @gfp_mask: memory allocation flags
467  * @vma: user vma this address belongs to
468  * @addr: target address for mempolicy
469  *
470  * Returns the struct page for entry and addr, after queueing swapin.
471  *
472  * Primitive swap readahead code. We simply read an aligned block of
473  * (1 << page_cluster) entries in the swap area. This method is chosen
474  * because it doesn't cost us any seek time.  We also make sure to queue
475  * the 'original' request together with the readahead ones...
476  *
477  * This has been extended to use the NUMA policies from the mm triggering
478  * the readahead.
479  *
480  * Caller must hold down_read on the vma->vm_mm if vma is not NULL.
481  */
482 struct page *swapin_readahead(swp_entry_t entry, gfp_t gfp_mask,
483                         struct vm_area_struct *vma, unsigned long addr)
484 {
485         struct page *page;
486         unsigned long entry_offset = swp_offset(entry);
487         unsigned long offset = entry_offset;
488         unsigned long start_offset, end_offset;
489         unsigned long mask;
490         struct blk_plug plug;
491
492         mask = swapin_nr_pages(offset) - 1;
493         if (!mask)
494                 goto skip;
495
496         /* Read a page_cluster sized and aligned cluster around offset. */
497         start_offset = offset & ~mask;
498         end_offset = offset | mask;
499         if (!start_offset)      /* First page is swap header. */
500                 start_offset++;
501
502         blk_start_plug(&plug);
503         for (offset = start_offset; offset <= end_offset ; offset++) {
504                 /* Ok, do the async read-ahead now */
505                 page = read_swap_cache_async(swp_entry(swp_type(entry), offset),
506                                                 gfp_mask, vma, addr);
507                 if (!page)
508                         continue;
509                 if (offset != entry_offset)
510                         SetPageReadahead(page);
511                 put_page(page);
512         }
513         blk_finish_plug(&plug);
514
515         lru_add_drain();        /* Push any new pages onto the LRU now */
516 skip:
517         return read_swap_cache_async(entry, gfp_mask, vma, addr);
518 }
519
520 int init_swap_address_space(unsigned int type, unsigned long nr_pages)
521 {
522         struct address_space *spaces, *space;
523         unsigned int i, nr;
524
525         nr = DIV_ROUND_UP(nr_pages, SWAP_ADDRESS_SPACE_PAGES);
526         spaces = kvzalloc(sizeof(struct address_space) * nr, GFP_KERNEL);
527         if (!spaces)
528                 return -ENOMEM;
529         for (i = 0; i < nr; i++) {
530                 space = spaces + i;
531                 INIT_RADIX_TREE(&space->page_tree, GFP_ATOMIC|__GFP_NOWARN);
532                 atomic_set(&space->i_mmap_writable, 0);
533                 space->a_ops = &swap_aops;
534                 /* swap cache doesn't use writeback related tags */
535                 mapping_set_no_writeback_tags(space);
536                 spin_lock_init(&space->tree_lock);
537         }
538         nr_swapper_spaces[type] = nr;
539         rcu_assign_pointer(swapper_spaces[type], spaces);
540
541         return 0;
542 }
543
544 void exit_swap_address_space(unsigned int type)
545 {
546         struct address_space *spaces;
547
548         spaces = swapper_spaces[type];
549         nr_swapper_spaces[type] = 0;
550         rcu_assign_pointer(swapper_spaces[type], NULL);
551         synchronize_rcu();
552         kvfree(spaces);
553 }