]> git.kernelconcepts.de Git - karo-tx-linux.git/blobdiff - mm/swap_slots.c
userfaultfd: replace ENOSPC with ESRCH in case mm has gone during copy/zeropage
[karo-tx-linux.git] / mm / swap_slots.c
index 58f6c78f1dad313dc7aa9b2ec31ddab361335fe1..13a174006b91234c395d186e6bc9e77319470be0 100644 (file)
@@ -263,7 +263,8 @@ static int refill_swap_slots_cache(struct swap_slots_cache *cache)
 
        cache->cur = 0;
        if (swap_slot_cache_active)
-               cache->nr = get_swap_pages(SWAP_SLOTS_CACHE_SIZE, cache->slots);
+               cache->nr = get_swap_pages(SWAP_SLOTS_CACHE_SIZE, false,
+                                          cache->slots);
 
        return cache->nr;
 }
@@ -272,11 +273,11 @@ int free_swap_slot(swp_entry_t entry)
 {
        struct swap_slots_cache *cache;
 
-       cache = &get_cpu_var(swp_slots);
+       cache = raw_cpu_ptr(&swp_slots);
        if (use_swap_slot_cache && cache->slots_ret) {
                spin_lock_irq(&cache->free_lock);
                /* Swap slots cache may be deactivated before acquiring lock */
-               if (!use_swap_slot_cache) {
+               if (!use_swap_slot_cache || !cache->slots_ret) {
                        spin_unlock_irq(&cache->free_lock);
                        goto direct_free;
                }
@@ -296,16 +297,23 @@ int free_swap_slot(swp_entry_t entry)
 direct_free:
                swapcache_free_entries(&entry, 1);
        }
-       put_cpu_var(swp_slots);
 
        return 0;
 }
 
-swp_entry_t get_swap_page(void)
+swp_entry_t get_swap_page(struct page *page)
 {
        swp_entry_t entry, *pentry;
        struct swap_slots_cache *cache;
 
+       entry.val = 0;
+
+       if (PageTransHuge(page)) {
+               if (IS_ENABLED(CONFIG_THP_SWAP))
+                       get_swap_pages(1, true, &entry);
+               return entry;
+       }
+
        /*
         * Preemption is allowed here, because we may sleep
         * in refill_swap_slots_cache().  But it is safe, because
@@ -317,7 +325,6 @@ swp_entry_t get_swap_page(void)
         */
        cache = raw_cpu_ptr(&swp_slots);
 
-       entry.val = 0;
        if (check_cache_active()) {
                mutex_lock(&cache->alloc_lock);
                if (cache->slots) {
@@ -337,7 +344,7 @@ repeat:
                        return entry;
        }
 
-       get_swap_pages(1, &entry);
+       get_swap_pages(1, false, &entry);
 
        return entry;
 }