1 /* 2 * Manage cache of swap slots to be used for and returned from 3 * swap. 4 * 5 * Copyright(c) 2016 Intel Corporation. 6 * 7 * Author: Tim Chen <tim.c.chen@linux.intel.com> 8 * 9 * We allocate the swap slots from the global pool and put 10 * it into local per cpu caches. This has the advantage 11 * of no needing to acquire the swap_info lock every time 12 * we need a new slot. 13 * 14 * There is also opportunity to simply return the slot 15 * to local caches without needing to acquire swap_info 16 * lock. We do not reuse the returned slots directly but 17 * move them back to the global pool in a batch. This 18 * allows the slots to coaellesce and reduce fragmentation. 19 * 20 * The swap entry allocated is marked with SWAP_HAS_CACHE 21 * flag in map_count that prevents it from being allocated 22 * again from the global pool. 23 * 24 * The swap slots cache is protected by a mutex instead of 25 * a spin lock as when we search for slots with scan_swap_map, 26 * we can possibly sleep. 27 */ 28 29 #include <linux/swap_slots.h> 30 #include <linux/cpu.h> 31 #include <linux/cpumask.h> 32 #include <linux/vmalloc.h> 33 #include <linux/mutex.h> 34 #include <linux/mm.h> 35 36 #ifdef CONFIG_SWAP 37 38 static DEFINE_PER_CPU(struct swap_slots_cache, swp_slots); 39 static bool swap_slot_cache_active; 40 bool swap_slot_cache_enabled; 41 static bool swap_slot_cache_initialized; 42 DEFINE_MUTEX(swap_slots_cache_mutex); 43 /* Serialize swap slots cache enable/disable operations */ 44 DEFINE_MUTEX(swap_slots_cache_enable_mutex); 45 46 static void __drain_swap_slots_cache(unsigned int type); 47 static void deactivate_swap_slots_cache(void); 48 static void reactivate_swap_slots_cache(void); 49 50 #define use_swap_slot_cache (swap_slot_cache_active && \ 51 swap_slot_cache_enabled && swap_slot_cache_initialized) 52 #define SLOTS_CACHE 0x1 53 #define SLOTS_CACHE_RET 0x2 54 55 static void deactivate_swap_slots_cache(void) 56 { 57 mutex_lock(&swap_slots_cache_mutex); 58 swap_slot_cache_active = false; 59 __drain_swap_slots_cache(SLOTS_CACHE|SLOTS_CACHE_RET); 60 mutex_unlock(&swap_slots_cache_mutex); 61 } 62 63 static void reactivate_swap_slots_cache(void) 64 { 65 mutex_lock(&swap_slots_cache_mutex); 66 swap_slot_cache_active = true; 67 mutex_unlock(&swap_slots_cache_mutex); 68 } 69 70 /* Must not be called with cpu hot plug lock */ 71 void disable_swap_slots_cache_lock(void) 72 { 73 mutex_lock(&swap_slots_cache_enable_mutex); 74 swap_slot_cache_enabled = false; 75 if (swap_slot_cache_initialized) { 76 /* serialize with cpu hotplug operations */ 77 get_online_cpus(); 78 __drain_swap_slots_cache(SLOTS_CACHE|SLOTS_CACHE_RET); 79 put_online_cpus(); 80 } 81 } 82 83 static void __reenable_swap_slots_cache(void) 84 { 85 swap_slot_cache_enabled = has_usable_swap(); 86 } 87 88 void reenable_swap_slots_cache_unlock(void) 89 { 90 __reenable_swap_slots_cache(); 91 mutex_unlock(&swap_slots_cache_enable_mutex); 92 } 93 94 static bool check_cache_active(void) 95 { 96 long pages; 97 98 if (!swap_slot_cache_enabled || !swap_slot_cache_initialized) 99 return false; 100 101 pages = get_nr_swap_pages(); 102 if (!swap_slot_cache_active) { 103 if (pages > num_online_cpus() * 104 THRESHOLD_ACTIVATE_SWAP_SLOTS_CACHE) 105 reactivate_swap_slots_cache(); 106 goto out; 107 } 108 109 /* if global pool of slot caches too low, deactivate cache */ 110 if (pages < num_online_cpus() * THRESHOLD_DEACTIVATE_SWAP_SLOTS_CACHE) 111 deactivate_swap_slots_cache(); 112 out: 113 return swap_slot_cache_active; 114 } 115 116 static int alloc_swap_slot_cache(unsigned int cpu) 117 { 118 struct swap_slots_cache *cache; 119 swp_entry_t *slots, *slots_ret; 120 121 /* 122 * Do allocation outside swap_slots_cache_mutex 123 * as kvzalloc could trigger reclaim and get_swap_page, 124 * which can lock swap_slots_cache_mutex. 125 */ 126 slots = kvzalloc(sizeof(swp_entry_t) * SWAP_SLOTS_CACHE_SIZE, 127 GFP_KERNEL); 128 if (!slots) 129 return -ENOMEM; 130 131 slots_ret = kvzalloc(sizeof(swp_entry_t) * SWAP_SLOTS_CACHE_SIZE, 132 GFP_KERNEL); 133 if (!slots_ret) { 134 kvfree(slots); 135 return -ENOMEM; 136 } 137 138 mutex_lock(&swap_slots_cache_mutex); 139 cache = &per_cpu(swp_slots, cpu); 140 if (cache->slots || cache->slots_ret) 141 /* cache already allocated */ 142 goto out; 143 if (!cache->lock_initialized) { 144 mutex_init(&cache->alloc_lock); 145 spin_lock_init(&cache->free_lock); 146 cache->lock_initialized = true; 147 } 148 cache->nr = 0; 149 cache->cur = 0; 150 cache->n_ret = 0; 151 cache->slots = slots; 152 slots = NULL; 153 cache->slots_ret = slots_ret; 154 slots_ret = NULL; 155 out: 156 mutex_unlock(&swap_slots_cache_mutex); 157 if (slots) 158 kvfree(slots); 159 if (slots_ret) 160 kvfree(slots_ret); 161 return 0; 162 } 163 164 static void drain_slots_cache_cpu(unsigned int cpu, unsigned int type, 165 bool free_slots) 166 { 167 struct swap_slots_cache *cache; 168 swp_entry_t *slots = NULL; 169 170 cache = &per_cpu(swp_slots, cpu); 171 if ((type & SLOTS_CACHE) && cache->slots) { 172 mutex_lock(&cache->alloc_lock); 173 swapcache_free_entries(cache->slots + cache->cur, cache->nr); 174 cache->cur = 0; 175 cache->nr = 0; 176 if (free_slots && cache->slots) { 177 kvfree(cache->slots); 178 cache->slots = NULL; 179 } 180 mutex_unlock(&cache->alloc_lock); 181 } 182 if ((type & SLOTS_CACHE_RET) && cache->slots_ret) { 183 spin_lock_irq(&cache->free_lock); 184 swapcache_free_entries(cache->slots_ret, cache->n_ret); 185 cache->n_ret = 0; 186 if (free_slots && cache->slots_ret) { 187 slots = cache->slots_ret; 188 cache->slots_ret = NULL; 189 } 190 spin_unlock_irq(&cache->free_lock); 191 if (slots) 192 kvfree(slots); 193 } 194 } 195 196 static void __drain_swap_slots_cache(unsigned int type) 197 { 198 unsigned int cpu; 199 200 /* 201 * This function is called during 202 * 1) swapoff, when we have to make sure no 203 * left over slots are in cache when we remove 204 * a swap device; 205 * 2) disabling of swap slot cache, when we run low 206 * on swap slots when allocating memory and need 207 * to return swap slots to global pool. 208 * 209 * We cannot acquire cpu hot plug lock here as 210 * this function can be invoked in the cpu 211 * hot plug path: 212 * cpu_up -> lock cpu_hotplug -> cpu hotplug state callback 213 * -> memory allocation -> direct reclaim -> get_swap_page 214 * -> drain_swap_slots_cache 215 * 216 * Hence the loop over current online cpu below could miss cpu that 217 * is being brought online but not yet marked as online. 218 * That is okay as we do not schedule and run anything on a 219 * cpu before it has been marked online. Hence, we will not 220 * fill any swap slots in slots cache of such cpu. 221 * There are no slots on such cpu that need to be drained. 222 */ 223 for_each_online_cpu(cpu) 224 drain_slots_cache_cpu(cpu, type, false); 225 } 226 227 static int free_slot_cache(unsigned int cpu) 228 { 229 mutex_lock(&swap_slots_cache_mutex); 230 drain_slots_cache_cpu(cpu, SLOTS_CACHE | SLOTS_CACHE_RET, true); 231 mutex_unlock(&swap_slots_cache_mutex); 232 return 0; 233 } 234 235 int enable_swap_slots_cache(void) 236 { 237 int ret = 0; 238 239 mutex_lock(&swap_slots_cache_enable_mutex); 240 if (swap_slot_cache_initialized) { 241 __reenable_swap_slots_cache(); 242 goto out_unlock; 243 } 244 245 ret = cpuhp_setup_state(CPUHP_AP_ONLINE_DYN, "swap_slots_cache", 246 alloc_swap_slot_cache, free_slot_cache); 247 if (WARN_ONCE(ret < 0, "Cache allocation failed (%s), operating " 248 "without swap slots cache.\n", __func__)) 249 goto out_unlock; 250 251 swap_slot_cache_initialized = true; 252 __reenable_swap_slots_cache(); 253 out_unlock: 254 mutex_unlock(&swap_slots_cache_enable_mutex); 255 return 0; 256 } 257 258 /* called with swap slot cache's alloc lock held */ 259 static int refill_swap_slots_cache(struct swap_slots_cache *cache) 260 { 261 if (!use_swap_slot_cache || cache->nr) 262 return 0; 263 264 cache->cur = 0; 265 if (swap_slot_cache_active) 266 cache->nr = get_swap_pages(SWAP_SLOTS_CACHE_SIZE, false, 267 cache->slots); 268 269 return cache->nr; 270 } 271 272 int free_swap_slot(swp_entry_t entry) 273 { 274 struct swap_slots_cache *cache; 275 276 cache = &get_cpu_var(swp_slots); 277 if (use_swap_slot_cache && cache->slots_ret) { 278 spin_lock_irq(&cache->free_lock); 279 /* Swap slots cache may be deactivated before acquiring lock */ 280 if (!use_swap_slot_cache) { 281 spin_unlock_irq(&cache->free_lock); 282 goto direct_free; 283 } 284 if (cache->n_ret >= SWAP_SLOTS_CACHE_SIZE) { 285 /* 286 * Return slots to global pool. 287 * The current swap_map value is SWAP_HAS_CACHE. 288 * Set it to 0 to indicate it is available for 289 * allocation in global pool 290 */ 291 swapcache_free_entries(cache->slots_ret, cache->n_ret); 292 cache->n_ret = 0; 293 } 294 cache->slots_ret[cache->n_ret++] = entry; 295 spin_unlock_irq(&cache->free_lock); 296 } else { 297 direct_free: 298 swapcache_free_entries(&entry, 1); 299 } 300 put_cpu_var(swp_slots); 301 302 return 0; 303 } 304 305 swp_entry_t get_swap_page(struct page *page) 306 { 307 swp_entry_t entry, *pentry; 308 struct swap_slots_cache *cache; 309 310 entry.val = 0; 311 312 if (PageTransHuge(page)) { 313 if (IS_ENABLED(CONFIG_THP_SWAP)) 314 get_swap_pages(1, true, &entry); 315 return entry; 316 } 317 318 /* 319 * Preemption is allowed here, because we may sleep 320 * in refill_swap_slots_cache(). But it is safe, because 321 * accesses to the per-CPU data structure are protected by the 322 * mutex cache->alloc_lock. 323 * 324 * The alloc path here does not touch cache->slots_ret 325 * so cache->free_lock is not taken. 326 */ 327 cache = raw_cpu_ptr(&swp_slots); 328 329 if (check_cache_active()) { 330 mutex_lock(&cache->alloc_lock); 331 if (cache->slots) { 332 repeat: 333 if (cache->nr) { 334 pentry = &cache->slots[cache->cur++]; 335 entry = *pentry; 336 pentry->val = 0; 337 cache->nr--; 338 } else { 339 if (refill_swap_slots_cache(cache)) 340 goto repeat; 341 } 342 } 343 mutex_unlock(&cache->alloc_lock); 344 if (entry.val) 345 return entry; 346 } 347 348 get_swap_pages(1, false, &entry); 349 350 return entry; 351 } 352 353 #endif /* CONFIG_SWAP */ 354