1b2441318SGreg Kroah-Hartman // SPDX-License-Identifier: GPL-2.0 267afa38eSTim Chen /* 367afa38eSTim Chen * Manage cache of swap slots to be used for and returned from 467afa38eSTim Chen * swap. 567afa38eSTim Chen * 667afa38eSTim Chen * Copyright(c) 2016 Intel Corporation. 767afa38eSTim Chen * 867afa38eSTim Chen * Author: Tim Chen <tim.c.chen@linux.intel.com> 967afa38eSTim Chen * 1067afa38eSTim Chen * We allocate the swap slots from the global pool and put 1167afa38eSTim Chen * it into local per cpu caches. This has the advantage 1267afa38eSTim Chen * of no needing to acquire the swap_info lock every time 1367afa38eSTim Chen * we need a new slot. 1467afa38eSTim Chen * 1567afa38eSTim Chen * There is also opportunity to simply return the slot 1667afa38eSTim Chen * to local caches without needing to acquire swap_info 1767afa38eSTim Chen * lock. We do not reuse the returned slots directly but 1867afa38eSTim Chen * move them back to the global pool in a batch. This 19*f0953a1bSIngo Molnar * allows the slots to coalesce and reduce fragmentation. 2067afa38eSTim Chen * 2167afa38eSTim Chen * The swap entry allocated is marked with SWAP_HAS_CACHE 2267afa38eSTim Chen * flag in map_count that prevents it from being allocated 2367afa38eSTim Chen * again from the global pool. 2467afa38eSTim Chen * 2567afa38eSTim Chen * The swap slots cache is protected by a mutex instead of 2667afa38eSTim Chen * a spin lock as when we search for slots with scan_swap_map, 2767afa38eSTim Chen * we can possibly sleep. 2867afa38eSTim Chen */ 2967afa38eSTim Chen 3067afa38eSTim Chen #include <linux/swap_slots.h> 3167afa38eSTim Chen #include <linux/cpu.h> 3267afa38eSTim Chen #include <linux/cpumask.h> 3367afa38eSTim Chen #include <linux/vmalloc.h> 3467afa38eSTim Chen #include <linux/mutex.h> 3554f180d3SHuang Ying #include <linux/mm.h> 3667afa38eSTim Chen 3767afa38eSTim Chen static DEFINE_PER_CPU(struct swap_slots_cache, swp_slots); 3867afa38eSTim Chen static bool swap_slot_cache_active; 39ba81f838SHuang Ying bool swap_slot_cache_enabled; 4067afa38eSTim Chen static bool swap_slot_cache_initialized; 4131f21da1SColin Ian King static DEFINE_MUTEX(swap_slots_cache_mutex); 4267afa38eSTim Chen /* Serialize swap slots cache enable/disable operations */ 4331f21da1SColin Ian King static DEFINE_MUTEX(swap_slots_cache_enable_mutex); 4467afa38eSTim Chen 4567afa38eSTim Chen static void __drain_swap_slots_cache(unsigned int type); 4667afa38eSTim Chen static void deactivate_swap_slots_cache(void); 4767afa38eSTim Chen static void reactivate_swap_slots_cache(void); 4867afa38eSTim Chen 49e0f3ebbaSZhen Lei #define use_swap_slot_cache (swap_slot_cache_active && swap_slot_cache_enabled) 5067afa38eSTim Chen #define SLOTS_CACHE 0x1 5167afa38eSTim Chen #define SLOTS_CACHE_RET 0x2 5267afa38eSTim Chen 5367afa38eSTim Chen static void deactivate_swap_slots_cache(void) 5467afa38eSTim Chen { 5567afa38eSTim Chen mutex_lock(&swap_slots_cache_mutex); 5667afa38eSTim Chen swap_slot_cache_active = false; 5767afa38eSTim Chen __drain_swap_slots_cache(SLOTS_CACHE|SLOTS_CACHE_RET); 5867afa38eSTim Chen mutex_unlock(&swap_slots_cache_mutex); 5967afa38eSTim Chen } 6067afa38eSTim Chen 6167afa38eSTim Chen static void reactivate_swap_slots_cache(void) 6267afa38eSTim Chen { 6367afa38eSTim Chen mutex_lock(&swap_slots_cache_mutex); 6467afa38eSTim Chen swap_slot_cache_active = true; 6567afa38eSTim Chen mutex_unlock(&swap_slots_cache_mutex); 6667afa38eSTim Chen } 6767afa38eSTim Chen 6867afa38eSTim Chen /* Must not be called with cpu hot plug lock */ 6967afa38eSTim Chen void disable_swap_slots_cache_lock(void) 7067afa38eSTim Chen { 7167afa38eSTim Chen mutex_lock(&swap_slots_cache_enable_mutex); 7267afa38eSTim Chen swap_slot_cache_enabled = false; 7367afa38eSTim Chen if (swap_slot_cache_initialized) { 7467afa38eSTim Chen /* serialize with cpu hotplug operations */ 7567afa38eSTim Chen get_online_cpus(); 7667afa38eSTim Chen __drain_swap_slots_cache(SLOTS_CACHE|SLOTS_CACHE_RET); 7767afa38eSTim Chen put_online_cpus(); 7867afa38eSTim Chen } 7967afa38eSTim Chen } 8067afa38eSTim Chen 8167afa38eSTim Chen static void __reenable_swap_slots_cache(void) 8267afa38eSTim Chen { 8367afa38eSTim Chen swap_slot_cache_enabled = has_usable_swap(); 8467afa38eSTim Chen } 8567afa38eSTim Chen 8667afa38eSTim Chen void reenable_swap_slots_cache_unlock(void) 8767afa38eSTim Chen { 8867afa38eSTim Chen __reenable_swap_slots_cache(); 8967afa38eSTim Chen mutex_unlock(&swap_slots_cache_enable_mutex); 9067afa38eSTim Chen } 9167afa38eSTim Chen 9267afa38eSTim Chen static bool check_cache_active(void) 9367afa38eSTim Chen { 9467afa38eSTim Chen long pages; 9567afa38eSTim Chen 96e0f3ebbaSZhen Lei if (!swap_slot_cache_enabled) 9767afa38eSTim Chen return false; 9867afa38eSTim Chen 9967afa38eSTim Chen pages = get_nr_swap_pages(); 10067afa38eSTim Chen if (!swap_slot_cache_active) { 10167afa38eSTim Chen if (pages > num_online_cpus() * 10267afa38eSTim Chen THRESHOLD_ACTIVATE_SWAP_SLOTS_CACHE) 10367afa38eSTim Chen reactivate_swap_slots_cache(); 10467afa38eSTim Chen goto out; 10567afa38eSTim Chen } 10667afa38eSTim Chen 10767afa38eSTim Chen /* if global pool of slot caches too low, deactivate cache */ 10867afa38eSTim Chen if (pages < num_online_cpus() * THRESHOLD_DEACTIVATE_SWAP_SLOTS_CACHE) 10967afa38eSTim Chen deactivate_swap_slots_cache(); 11067afa38eSTim Chen out: 11167afa38eSTim Chen return swap_slot_cache_active; 11267afa38eSTim Chen } 11367afa38eSTim Chen 11467afa38eSTim Chen static int alloc_swap_slot_cache(unsigned int cpu) 11567afa38eSTim Chen { 11667afa38eSTim Chen struct swap_slots_cache *cache; 11767afa38eSTim Chen swp_entry_t *slots, *slots_ret; 11867afa38eSTim Chen 11967afa38eSTim Chen /* 12067afa38eSTim Chen * Do allocation outside swap_slots_cache_mutex 12154f180d3SHuang Ying * as kvzalloc could trigger reclaim and get_swap_page, 12267afa38eSTim Chen * which can lock swap_slots_cache_mutex. 12367afa38eSTim Chen */ 124778e1cddSKees Cook slots = kvcalloc(SWAP_SLOTS_CACHE_SIZE, sizeof(swp_entry_t), 12554f180d3SHuang Ying GFP_KERNEL); 12667afa38eSTim Chen if (!slots) 12767afa38eSTim Chen return -ENOMEM; 12867afa38eSTim Chen 129778e1cddSKees Cook slots_ret = kvcalloc(SWAP_SLOTS_CACHE_SIZE, sizeof(swp_entry_t), 13054f180d3SHuang Ying GFP_KERNEL); 13167afa38eSTim Chen if (!slots_ret) { 13254f180d3SHuang Ying kvfree(slots); 13367afa38eSTim Chen return -ENOMEM; 13467afa38eSTim Chen } 13567afa38eSTim Chen 13667afa38eSTim Chen mutex_lock(&swap_slots_cache_mutex); 13767afa38eSTim Chen cache = &per_cpu(swp_slots, cpu); 138f90eae2aSZhen Lei if (cache->slots || cache->slots_ret) { 13967afa38eSTim Chen /* cache already allocated */ 140f90eae2aSZhen Lei mutex_unlock(&swap_slots_cache_mutex); 141f90eae2aSZhen Lei 142f90eae2aSZhen Lei kvfree(slots); 143f90eae2aSZhen Lei kvfree(slots_ret); 144f90eae2aSZhen Lei 145f90eae2aSZhen Lei return 0; 146f90eae2aSZhen Lei } 147f90eae2aSZhen Lei 14867afa38eSTim Chen if (!cache->lock_initialized) { 14967afa38eSTim Chen mutex_init(&cache->alloc_lock); 15067afa38eSTim Chen spin_lock_init(&cache->free_lock); 15167afa38eSTim Chen cache->lock_initialized = true; 15267afa38eSTim Chen } 15367afa38eSTim Chen cache->nr = 0; 15467afa38eSTim Chen cache->cur = 0; 15567afa38eSTim Chen cache->n_ret = 0; 156a2e16731STim Chen /* 157a2e16731STim Chen * We initialized alloc_lock and free_lock earlier. We use 158a2e16731STim Chen * !cache->slots or !cache->slots_ret to know if it is safe to acquire 159a2e16731STim Chen * the corresponding lock and use the cache. Memory barrier below 160a2e16731STim Chen * ensures the assumption. 161a2e16731STim Chen */ 162a2e16731STim Chen mb(); 16367afa38eSTim Chen cache->slots = slots; 16467afa38eSTim Chen cache->slots_ret = slots_ret; 16567afa38eSTim Chen mutex_unlock(&swap_slots_cache_mutex); 16667afa38eSTim Chen return 0; 16767afa38eSTim Chen } 16867afa38eSTim Chen 16967afa38eSTim Chen static void drain_slots_cache_cpu(unsigned int cpu, unsigned int type, 17067afa38eSTim Chen bool free_slots) 17167afa38eSTim Chen { 17267afa38eSTim Chen struct swap_slots_cache *cache; 17367afa38eSTim Chen swp_entry_t *slots = NULL; 17467afa38eSTim Chen 17567afa38eSTim Chen cache = &per_cpu(swp_slots, cpu); 17667afa38eSTim Chen if ((type & SLOTS_CACHE) && cache->slots) { 17767afa38eSTim Chen mutex_lock(&cache->alloc_lock); 17867afa38eSTim Chen swapcache_free_entries(cache->slots + cache->cur, cache->nr); 17967afa38eSTim Chen cache->cur = 0; 18067afa38eSTim Chen cache->nr = 0; 18167afa38eSTim Chen if (free_slots && cache->slots) { 18254f180d3SHuang Ying kvfree(cache->slots); 18367afa38eSTim Chen cache->slots = NULL; 18467afa38eSTim Chen } 18567afa38eSTim Chen mutex_unlock(&cache->alloc_lock); 18667afa38eSTim Chen } 18767afa38eSTim Chen if ((type & SLOTS_CACHE_RET) && cache->slots_ret) { 18867afa38eSTim Chen spin_lock_irq(&cache->free_lock); 18967afa38eSTim Chen swapcache_free_entries(cache->slots_ret, cache->n_ret); 19067afa38eSTim Chen cache->n_ret = 0; 19167afa38eSTim Chen if (free_slots && cache->slots_ret) { 19267afa38eSTim Chen slots = cache->slots_ret; 19367afa38eSTim Chen cache->slots_ret = NULL; 19467afa38eSTim Chen } 19567afa38eSTim Chen spin_unlock_irq(&cache->free_lock); 19654f180d3SHuang Ying kvfree(slots); 19767afa38eSTim Chen } 19867afa38eSTim Chen } 19967afa38eSTim Chen 20067afa38eSTim Chen static void __drain_swap_slots_cache(unsigned int type) 20167afa38eSTim Chen { 20267afa38eSTim Chen unsigned int cpu; 20367afa38eSTim Chen 20467afa38eSTim Chen /* 20567afa38eSTim Chen * This function is called during 20667afa38eSTim Chen * 1) swapoff, when we have to make sure no 20767afa38eSTim Chen * left over slots are in cache when we remove 20867afa38eSTim Chen * a swap device; 20967afa38eSTim Chen * 2) disabling of swap slot cache, when we run low 21067afa38eSTim Chen * on swap slots when allocating memory and need 21167afa38eSTim Chen * to return swap slots to global pool. 21267afa38eSTim Chen * 21367afa38eSTim Chen * We cannot acquire cpu hot plug lock here as 21467afa38eSTim Chen * this function can be invoked in the cpu 21567afa38eSTim Chen * hot plug path: 21667afa38eSTim Chen * cpu_up -> lock cpu_hotplug -> cpu hotplug state callback 21767afa38eSTim Chen * -> memory allocation -> direct reclaim -> get_swap_page 21867afa38eSTim Chen * -> drain_swap_slots_cache 21967afa38eSTim Chen * 22067afa38eSTim Chen * Hence the loop over current online cpu below could miss cpu that 22167afa38eSTim Chen * is being brought online but not yet marked as online. 22267afa38eSTim Chen * That is okay as we do not schedule and run anything on a 22367afa38eSTim Chen * cpu before it has been marked online. Hence, we will not 22467afa38eSTim Chen * fill any swap slots in slots cache of such cpu. 22567afa38eSTim Chen * There are no slots on such cpu that need to be drained. 22667afa38eSTim Chen */ 22767afa38eSTim Chen for_each_online_cpu(cpu) 22867afa38eSTim Chen drain_slots_cache_cpu(cpu, type, false); 22967afa38eSTim Chen } 23067afa38eSTim Chen 23167afa38eSTim Chen static int free_slot_cache(unsigned int cpu) 23267afa38eSTim Chen { 23367afa38eSTim Chen mutex_lock(&swap_slots_cache_mutex); 23467afa38eSTim Chen drain_slots_cache_cpu(cpu, SLOTS_CACHE | SLOTS_CACHE_RET, true); 23567afa38eSTim Chen mutex_unlock(&swap_slots_cache_mutex); 23667afa38eSTim Chen return 0; 23767afa38eSTim Chen } 23867afa38eSTim Chen 239f3bc52cbSMiaohe Lin void enable_swap_slots_cache(void) 24067afa38eSTim Chen { 24167afa38eSTim Chen mutex_lock(&swap_slots_cache_enable_mutex); 242d69a9575SZhen Lei if (!swap_slot_cache_initialized) { 243d69a9575SZhen Lei int ret; 24467afa38eSTim Chen 24567afa38eSTim Chen ret = cpuhp_setup_state(CPUHP_AP_ONLINE_DYN, "swap_slots_cache", 24667afa38eSTim Chen alloc_swap_slot_cache, free_slot_cache); 2479b7a8143STim Chen if (WARN_ONCE(ret < 0, "Cache allocation failed (%s), operating " 2489b7a8143STim Chen "without swap slots cache.\n", __func__)) 24967afa38eSTim Chen goto out_unlock; 2509b7a8143STim Chen 25167afa38eSTim Chen swap_slot_cache_initialized = true; 252d69a9575SZhen Lei } 253d69a9575SZhen Lei 25467afa38eSTim Chen __reenable_swap_slots_cache(); 25567afa38eSTim Chen out_unlock: 25667afa38eSTim Chen mutex_unlock(&swap_slots_cache_enable_mutex); 25767afa38eSTim Chen } 25867afa38eSTim Chen 25967afa38eSTim Chen /* called with swap slot cache's alloc lock held */ 26067afa38eSTim Chen static int refill_swap_slots_cache(struct swap_slots_cache *cache) 26167afa38eSTim Chen { 26267afa38eSTim Chen if (!use_swap_slot_cache || cache->nr) 26367afa38eSTim Chen return 0; 26467afa38eSTim Chen 26567afa38eSTim Chen cache->cur = 0; 26667afa38eSTim Chen if (swap_slot_cache_active) 2675d5e8f19SHuang Ying cache->nr = get_swap_pages(SWAP_SLOTS_CACHE_SIZE, 2685d5e8f19SHuang Ying cache->slots, 1); 26967afa38eSTim Chen 27067afa38eSTim Chen return cache->nr; 27167afa38eSTim Chen } 27267afa38eSTim Chen 27367afa38eSTim Chen int free_swap_slot(swp_entry_t entry) 27467afa38eSTim Chen { 27567afa38eSTim Chen struct swap_slots_cache *cache; 27667afa38eSTim Chen 277f07e0f84SSebastian Andrzej Siewior cache = raw_cpu_ptr(&swp_slots); 278a2e16731STim Chen if (likely(use_swap_slot_cache && cache->slots_ret)) { 27967afa38eSTim Chen spin_lock_irq(&cache->free_lock); 28067afa38eSTim Chen /* Swap slots cache may be deactivated before acquiring lock */ 281f07e0f84SSebastian Andrzej Siewior if (!use_swap_slot_cache || !cache->slots_ret) { 28267afa38eSTim Chen spin_unlock_irq(&cache->free_lock); 28367afa38eSTim Chen goto direct_free; 28467afa38eSTim Chen } 28567afa38eSTim Chen if (cache->n_ret >= SWAP_SLOTS_CACHE_SIZE) { 28667afa38eSTim Chen /* 28767afa38eSTim Chen * Return slots to global pool. 28867afa38eSTim Chen * The current swap_map value is SWAP_HAS_CACHE. 28967afa38eSTim Chen * Set it to 0 to indicate it is available for 29067afa38eSTim Chen * allocation in global pool 29167afa38eSTim Chen */ 29267afa38eSTim Chen swapcache_free_entries(cache->slots_ret, cache->n_ret); 29367afa38eSTim Chen cache->n_ret = 0; 29467afa38eSTim Chen } 29567afa38eSTim Chen cache->slots_ret[cache->n_ret++] = entry; 29667afa38eSTim Chen spin_unlock_irq(&cache->free_lock); 29767afa38eSTim Chen } else { 29867afa38eSTim Chen direct_free: 29967afa38eSTim Chen swapcache_free_entries(&entry, 1); 30067afa38eSTim Chen } 30167afa38eSTim Chen 30267afa38eSTim Chen return 0; 30367afa38eSTim Chen } 30467afa38eSTim Chen 30538d8b4e6SHuang Ying swp_entry_t get_swap_page(struct page *page) 30667afa38eSTim Chen { 3072406b76fSWei Yang swp_entry_t entry; 30867afa38eSTim Chen struct swap_slots_cache *cache; 30967afa38eSTim Chen 31038d8b4e6SHuang Ying entry.val = 0; 31138d8b4e6SHuang Ying 31238d8b4e6SHuang Ying if (PageTransHuge(page)) { 31338d8b4e6SHuang Ying if (IS_ENABLED(CONFIG_THP_SWAP)) 3145d5e8f19SHuang Ying get_swap_pages(1, &entry, HPAGE_PMD_NR); 315bb98f2c5STejun Heo goto out; 31638d8b4e6SHuang Ying } 31738d8b4e6SHuang Ying 31867afa38eSTim Chen /* 31967afa38eSTim Chen * Preemption is allowed here, because we may sleep 32067afa38eSTim Chen * in refill_swap_slots_cache(). But it is safe, because 32167afa38eSTim Chen * accesses to the per-CPU data structure are protected by the 32267afa38eSTim Chen * mutex cache->alloc_lock. 32367afa38eSTim Chen * 32467afa38eSTim Chen * The alloc path here does not touch cache->slots_ret 32567afa38eSTim Chen * so cache->free_lock is not taken. 32667afa38eSTim Chen */ 32767afa38eSTim Chen cache = raw_cpu_ptr(&swp_slots); 32867afa38eSTim Chen 329a2e16731STim Chen if (likely(check_cache_active() && cache->slots)) { 33067afa38eSTim Chen mutex_lock(&cache->alloc_lock); 33167afa38eSTim Chen if (cache->slots) { 33267afa38eSTim Chen repeat: 33367afa38eSTim Chen if (cache->nr) { 3342406b76fSWei Yang entry = cache->slots[cache->cur]; 3352406b76fSWei Yang cache->slots[cache->cur++].val = 0; 33667afa38eSTim Chen cache->nr--; 3372406b76fSWei Yang } else if (refill_swap_slots_cache(cache)) { 33867afa38eSTim Chen goto repeat; 33967afa38eSTim Chen } 34067afa38eSTim Chen } 34167afa38eSTim Chen mutex_unlock(&cache->alloc_lock); 34267afa38eSTim Chen if (entry.val) 343bb98f2c5STejun Heo goto out; 34467afa38eSTim Chen } 34567afa38eSTim Chen 3465d5e8f19SHuang Ying get_swap_pages(1, &entry, 1); 347bb98f2c5STejun Heo out: 348bb98f2c5STejun Heo if (mem_cgroup_try_charge_swap(page, entry)) { 349bb98f2c5STejun Heo put_swap_page(page, entry); 350bb98f2c5STejun Heo entry.val = 0; 351bb98f2c5STejun Heo } 35267afa38eSTim Chen return entry; 35367afa38eSTim Chen } 354