1 /* 2 * linux/mm/swap.c 3 * 4 * Copyright (C) 1991, 1992, 1993, 1994 Linus Torvalds 5 */ 6 7 /* 8 * This file contains the default values for the operation of the 9 * Linux VM subsystem. Fine-tuning documentation can be found in 10 * Documentation/sysctl/vm.txt. 11 * Started 18.12.91 12 * Swap aging added 23.2.95, Stephen Tweedie. 13 * Buffermem limits added 12.3.98, Rik van Riel. 14 */ 15 16 #include <linux/mm.h> 17 #include <linux/sched.h> 18 #include <linux/kernel_stat.h> 19 #include <linux/swap.h> 20 #include <linux/mman.h> 21 #include <linux/pagemap.h> 22 #include <linux/pagevec.h> 23 #include <linux/init.h> 24 #include <linux/module.h> 25 #include <linux/mm_inline.h> 26 #include <linux/buffer_head.h> /* for try_to_release_page() */ 27 #include <linux/percpu_counter.h> 28 #include <linux/percpu.h> 29 #include <linux/cpu.h> 30 #include <linux/notifier.h> 31 #include <linux/backing-dev.h> 32 #include <linux/memcontrol.h> 33 #include <linux/gfp.h> 34 35 #include "internal.h" 36 37 /* How many pages do we try to swap or page in/out together? */ 38 int page_cluster; 39 40 static DEFINE_PER_CPU(struct pagevec[NR_LRU_LISTS], lru_add_pvecs); 41 static DEFINE_PER_CPU(struct pagevec, lru_rotate_pvecs); 42 43 /* 44 * This path almost never happens for VM activity - pages are normally 45 * freed via pagevecs. But it gets used by networking. 46 */ 47 static void __page_cache_release(struct page *page) 48 { 49 if (PageLRU(page)) { 50 unsigned long flags; 51 struct zone *zone = page_zone(page); 52 53 spin_lock_irqsave(&zone->lru_lock, flags); 54 VM_BUG_ON(!PageLRU(page)); 55 __ClearPageLRU(page); 56 del_page_from_lru(zone, page); 57 spin_unlock_irqrestore(&zone->lru_lock, flags); 58 } 59 free_hot_cold_page(page, 0); 60 } 61 62 static void put_compound_page(struct page *page) 63 { 64 page = compound_head(page); 65 if (put_page_testzero(page)) { 66 compound_page_dtor *dtor; 67 68 dtor = get_compound_page_dtor(page); 69 (*dtor)(page); 70 } 71 } 72 73 void put_page(struct page *page) 74 { 75 if (unlikely(PageCompound(page))) 76 put_compound_page(page); 77 else if (put_page_testzero(page)) 78 __page_cache_release(page); 79 } 80 EXPORT_SYMBOL(put_page); 81 82 /** 83 * put_pages_list() - release a list of pages 84 * @pages: list of pages threaded on page->lru 85 * 86 * Release a list of pages which are strung together on page.lru. Currently 87 * used by read_cache_pages() and related error recovery code. 88 */ 89 void put_pages_list(struct list_head *pages) 90 { 91 while (!list_empty(pages)) { 92 struct page *victim; 93 94 victim = list_entry(pages->prev, struct page, lru); 95 list_del(&victim->lru); 96 page_cache_release(victim); 97 } 98 } 99 EXPORT_SYMBOL(put_pages_list); 100 101 /* 102 * pagevec_move_tail() must be called with IRQ disabled. 103 * Otherwise this may cause nasty races. 104 */ 105 static void pagevec_move_tail(struct pagevec *pvec) 106 { 107 int i; 108 int pgmoved = 0; 109 struct zone *zone = NULL; 110 111 for (i = 0; i < pagevec_count(pvec); i++) { 112 struct page *page = pvec->pages[i]; 113 struct zone *pagezone = page_zone(page); 114 115 if (pagezone != zone) { 116 if (zone) 117 spin_unlock(&zone->lru_lock); 118 zone = pagezone; 119 spin_lock(&zone->lru_lock); 120 } 121 if (PageLRU(page) && !PageActive(page) && !PageUnevictable(page)) { 122 int lru = page_lru_base_type(page); 123 list_move_tail(&page->lru, &zone->lru[lru].list); 124 pgmoved++; 125 } 126 } 127 if (zone) 128 spin_unlock(&zone->lru_lock); 129 __count_vm_events(PGROTATED, pgmoved); 130 release_pages(pvec->pages, pvec->nr, pvec->cold); 131 pagevec_reinit(pvec); 132 } 133 134 /* 135 * Writeback is about to end against a page which has been marked for immediate 136 * reclaim. If it still appears to be reclaimable, move it to the tail of the 137 * inactive list. 138 */ 139 void rotate_reclaimable_page(struct page *page) 140 { 141 if (!PageLocked(page) && !PageDirty(page) && !PageActive(page) && 142 !PageUnevictable(page) && PageLRU(page)) { 143 struct pagevec *pvec; 144 unsigned long flags; 145 146 page_cache_get(page); 147 local_irq_save(flags); 148 pvec = &__get_cpu_var(lru_rotate_pvecs); 149 if (!pagevec_add(pvec, page)) 150 pagevec_move_tail(pvec); 151 local_irq_restore(flags); 152 } 153 } 154 155 static void update_page_reclaim_stat(struct zone *zone, struct page *page, 156 int file, int rotated) 157 { 158 struct zone_reclaim_stat *reclaim_stat = &zone->reclaim_stat; 159 struct zone_reclaim_stat *memcg_reclaim_stat; 160 161 memcg_reclaim_stat = mem_cgroup_get_reclaim_stat_from_page(page); 162 163 reclaim_stat->recent_scanned[file]++; 164 if (rotated) 165 reclaim_stat->recent_rotated[file]++; 166 167 if (!memcg_reclaim_stat) 168 return; 169 170 memcg_reclaim_stat->recent_scanned[file]++; 171 if (rotated) 172 memcg_reclaim_stat->recent_rotated[file]++; 173 } 174 175 /* 176 * FIXME: speed this up? 177 */ 178 void activate_page(struct page *page) 179 { 180 struct zone *zone = page_zone(page); 181 182 spin_lock_irq(&zone->lru_lock); 183 if (PageLRU(page) && !PageActive(page) && !PageUnevictable(page)) { 184 int file = page_is_file_cache(page); 185 int lru = page_lru_base_type(page); 186 del_page_from_lru_list(zone, page, lru); 187 188 SetPageActive(page); 189 lru += LRU_ACTIVE; 190 add_page_to_lru_list(zone, page, lru); 191 __count_vm_event(PGACTIVATE); 192 193 update_page_reclaim_stat(zone, page, file, 1); 194 } 195 spin_unlock_irq(&zone->lru_lock); 196 } 197 198 /* 199 * Mark a page as having seen activity. 200 * 201 * inactive,unreferenced -> inactive,referenced 202 * inactive,referenced -> active,unreferenced 203 * active,unreferenced -> active,referenced 204 */ 205 void mark_page_accessed(struct page *page) 206 { 207 if (!PageActive(page) && !PageUnevictable(page) && 208 PageReferenced(page) && PageLRU(page)) { 209 activate_page(page); 210 ClearPageReferenced(page); 211 } else if (!PageReferenced(page)) { 212 SetPageReferenced(page); 213 } 214 } 215 216 EXPORT_SYMBOL(mark_page_accessed); 217 218 void __lru_cache_add(struct page *page, enum lru_list lru) 219 { 220 struct pagevec *pvec = &get_cpu_var(lru_add_pvecs)[lru]; 221 222 page_cache_get(page); 223 if (!pagevec_add(pvec, page)) 224 ____pagevec_lru_add(pvec, lru); 225 put_cpu_var(lru_add_pvecs); 226 } 227 228 /** 229 * lru_cache_add_lru - add a page to a page list 230 * @page: the page to be added to the LRU. 231 * @lru: the LRU list to which the page is added. 232 */ 233 void lru_cache_add_lru(struct page *page, enum lru_list lru) 234 { 235 if (PageActive(page)) { 236 VM_BUG_ON(PageUnevictable(page)); 237 ClearPageActive(page); 238 } else if (PageUnevictable(page)) { 239 VM_BUG_ON(PageActive(page)); 240 ClearPageUnevictable(page); 241 } 242 243 VM_BUG_ON(PageLRU(page) || PageActive(page) || PageUnevictable(page)); 244 __lru_cache_add(page, lru); 245 } 246 247 /** 248 * add_page_to_unevictable_list - add a page to the unevictable list 249 * @page: the page to be added to the unevictable list 250 * 251 * Add page directly to its zone's unevictable list. To avoid races with 252 * tasks that might be making the page evictable, through eg. munlock, 253 * munmap or exit, while it's not on the lru, we want to add the page 254 * while it's locked or otherwise "invisible" to other tasks. This is 255 * difficult to do when using the pagevec cache, so bypass that. 256 */ 257 void add_page_to_unevictable_list(struct page *page) 258 { 259 struct zone *zone = page_zone(page); 260 261 spin_lock_irq(&zone->lru_lock); 262 SetPageUnevictable(page); 263 SetPageLRU(page); 264 add_page_to_lru_list(zone, page, LRU_UNEVICTABLE); 265 spin_unlock_irq(&zone->lru_lock); 266 } 267 268 /* 269 * Drain pages out of the cpu's pagevecs. 270 * Either "cpu" is the current CPU, and preemption has already been 271 * disabled; or "cpu" is being hot-unplugged, and is already dead. 272 */ 273 static void drain_cpu_pagevecs(int cpu) 274 { 275 struct pagevec *pvecs = per_cpu(lru_add_pvecs, cpu); 276 struct pagevec *pvec; 277 int lru; 278 279 for_each_lru(lru) { 280 pvec = &pvecs[lru - LRU_BASE]; 281 if (pagevec_count(pvec)) 282 ____pagevec_lru_add(pvec, lru); 283 } 284 285 pvec = &per_cpu(lru_rotate_pvecs, cpu); 286 if (pagevec_count(pvec)) { 287 unsigned long flags; 288 289 /* No harm done if a racing interrupt already did this */ 290 local_irq_save(flags); 291 pagevec_move_tail(pvec); 292 local_irq_restore(flags); 293 } 294 } 295 296 void lru_add_drain(void) 297 { 298 drain_cpu_pagevecs(get_cpu()); 299 put_cpu(); 300 } 301 302 static void lru_add_drain_per_cpu(struct work_struct *dummy) 303 { 304 lru_add_drain(); 305 } 306 307 /* 308 * Returns 0 for success 309 */ 310 int lru_add_drain_all(void) 311 { 312 return schedule_on_each_cpu(lru_add_drain_per_cpu); 313 } 314 315 /* 316 * Batched page_cache_release(). Decrement the reference count on all the 317 * passed pages. If it fell to zero then remove the page from the LRU and 318 * free it. 319 * 320 * Avoid taking zone->lru_lock if possible, but if it is taken, retain it 321 * for the remainder of the operation. 322 * 323 * The locking in this function is against shrink_inactive_list(): we recheck 324 * the page count inside the lock to see whether shrink_inactive_list() 325 * grabbed the page via the LRU. If it did, give up: shrink_inactive_list() 326 * will free it. 327 */ 328 void release_pages(struct page **pages, int nr, int cold) 329 { 330 int i; 331 struct pagevec pages_to_free; 332 struct zone *zone = NULL; 333 unsigned long uninitialized_var(flags); 334 335 pagevec_init(&pages_to_free, cold); 336 for (i = 0; i < nr; i++) { 337 struct page *page = pages[i]; 338 339 if (unlikely(PageCompound(page))) { 340 if (zone) { 341 spin_unlock_irqrestore(&zone->lru_lock, flags); 342 zone = NULL; 343 } 344 put_compound_page(page); 345 continue; 346 } 347 348 if (!put_page_testzero(page)) 349 continue; 350 351 if (PageLRU(page)) { 352 struct zone *pagezone = page_zone(page); 353 354 if (pagezone != zone) { 355 if (zone) 356 spin_unlock_irqrestore(&zone->lru_lock, 357 flags); 358 zone = pagezone; 359 spin_lock_irqsave(&zone->lru_lock, flags); 360 } 361 VM_BUG_ON(!PageLRU(page)); 362 __ClearPageLRU(page); 363 del_page_from_lru(zone, page); 364 } 365 366 if (!pagevec_add(&pages_to_free, page)) { 367 if (zone) { 368 spin_unlock_irqrestore(&zone->lru_lock, flags); 369 zone = NULL; 370 } 371 __pagevec_free(&pages_to_free); 372 pagevec_reinit(&pages_to_free); 373 } 374 } 375 if (zone) 376 spin_unlock_irqrestore(&zone->lru_lock, flags); 377 378 pagevec_free(&pages_to_free); 379 } 380 381 /* 382 * The pages which we're about to release may be in the deferred lru-addition 383 * queues. That would prevent them from really being freed right now. That's 384 * OK from a correctness point of view but is inefficient - those pages may be 385 * cache-warm and we want to give them back to the page allocator ASAP. 386 * 387 * So __pagevec_release() will drain those queues here. __pagevec_lru_add() 388 * and __pagevec_lru_add_active() call release_pages() directly to avoid 389 * mutual recursion. 390 */ 391 void __pagevec_release(struct pagevec *pvec) 392 { 393 lru_add_drain(); 394 release_pages(pvec->pages, pagevec_count(pvec), pvec->cold); 395 pagevec_reinit(pvec); 396 } 397 398 EXPORT_SYMBOL(__pagevec_release); 399 400 /* 401 * Add the passed pages to the LRU, then drop the caller's refcount 402 * on them. Reinitialises the caller's pagevec. 403 */ 404 void ____pagevec_lru_add(struct pagevec *pvec, enum lru_list lru) 405 { 406 int i; 407 struct zone *zone = NULL; 408 409 VM_BUG_ON(is_unevictable_lru(lru)); 410 411 for (i = 0; i < pagevec_count(pvec); i++) { 412 struct page *page = pvec->pages[i]; 413 struct zone *pagezone = page_zone(page); 414 int file; 415 int active; 416 417 if (pagezone != zone) { 418 if (zone) 419 spin_unlock_irq(&zone->lru_lock); 420 zone = pagezone; 421 spin_lock_irq(&zone->lru_lock); 422 } 423 VM_BUG_ON(PageActive(page)); 424 VM_BUG_ON(PageUnevictable(page)); 425 VM_BUG_ON(PageLRU(page)); 426 SetPageLRU(page); 427 active = is_active_lru(lru); 428 file = is_file_lru(lru); 429 if (active) 430 SetPageActive(page); 431 update_page_reclaim_stat(zone, page, file, active); 432 add_page_to_lru_list(zone, page, lru); 433 } 434 if (zone) 435 spin_unlock_irq(&zone->lru_lock); 436 release_pages(pvec->pages, pvec->nr, pvec->cold); 437 pagevec_reinit(pvec); 438 } 439 440 EXPORT_SYMBOL(____pagevec_lru_add); 441 442 /* 443 * Try to drop buffers from the pages in a pagevec 444 */ 445 void pagevec_strip(struct pagevec *pvec) 446 { 447 int i; 448 449 for (i = 0; i < pagevec_count(pvec); i++) { 450 struct page *page = pvec->pages[i]; 451 452 if (page_has_private(page) && trylock_page(page)) { 453 if (page_has_private(page)) 454 try_to_release_page(page, 0); 455 unlock_page(page); 456 } 457 } 458 } 459 460 /** 461 * pagevec_lookup - gang pagecache lookup 462 * @pvec: Where the resulting pages are placed 463 * @mapping: The address_space to search 464 * @start: The starting page index 465 * @nr_pages: The maximum number of pages 466 * 467 * pagevec_lookup() will search for and return a group of up to @nr_pages pages 468 * in the mapping. The pages are placed in @pvec. pagevec_lookup() takes a 469 * reference against the pages in @pvec. 470 * 471 * The search returns a group of mapping-contiguous pages with ascending 472 * indexes. There may be holes in the indices due to not-present pages. 473 * 474 * pagevec_lookup() returns the number of pages which were found. 475 */ 476 unsigned pagevec_lookup(struct pagevec *pvec, struct address_space *mapping, 477 pgoff_t start, unsigned nr_pages) 478 { 479 pvec->nr = find_get_pages(mapping, start, nr_pages, pvec->pages); 480 return pagevec_count(pvec); 481 } 482 483 EXPORT_SYMBOL(pagevec_lookup); 484 485 unsigned pagevec_lookup_tag(struct pagevec *pvec, struct address_space *mapping, 486 pgoff_t *index, int tag, unsigned nr_pages) 487 { 488 pvec->nr = find_get_pages_tag(mapping, index, tag, 489 nr_pages, pvec->pages); 490 return pagevec_count(pvec); 491 } 492 493 EXPORT_SYMBOL(pagevec_lookup_tag); 494 495 /* 496 * Perform any setup for the swap system 497 */ 498 void __init swap_setup(void) 499 { 500 unsigned long megs = totalram_pages >> (20 - PAGE_SHIFT); 501 502 #ifdef CONFIG_SWAP 503 bdi_init(swapper_space.backing_dev_info); 504 #endif 505 506 /* Use a smaller cluster for small-memory machines */ 507 if (megs < 16) 508 page_cluster = 2; 509 else 510 page_cluster = 3; 511 /* 512 * Right now other parts of the system means that we 513 * _really_ don't want to cluster much more 514 */ 515 } 516